File: run-all-perf.sh

package info (click to toggle)
llama.cpp 5882%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 34,020 kB
  • sloc: cpp: 189,548; ansic: 115,889; python: 24,977; objc: 6,050; lisp: 5,741; sh: 5,571; makefile: 1,293; javascript: 807; xml: 259
file content (34 lines) | stat: -rwxr-xr-x 557 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
#!/usr/bin/env bash

qnt=(f16 q8_0 q6_k q5_k q5_1 q5_0 q4_k q4_1 q4_0 q3_k q2_k)
args="-ngl 999 -n 64 -p 512"

if [ -z "$1" ]; then
    echo "usage: $0 <model> [qnt] [args]"
    echo "default: $0 <model> \"${qnt[@]}\" \"${args}\""
    exit 1
fi

if [ ! -z "$2" ]; then
    qnt=($2)
fi

if [ ! -z "$3" ]; then
    args="$3"
fi

model="$1"
out="../tmp/results-${model}"

set -o pipefail
set -e

mkdir -p ${out}

mstr=""

for q in ${qnt[@]}; do
    mstr="${mstr} -m ../models/${model}/ggml-model-${q}.gguf"
done

./bin/llama-bench ${mstr} ${args} 2> /dev/null