File: quantize.sh

package info (click to toggle)
llama.cpp 6641%2Bdfsg-2
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 43,824 kB
  • sloc: cpp: 218,020; ansic: 117,624; python: 29,020; lisp: 9,094; sh: 5,776; objc: 1,045; javascript: 828; xml: 259; makefile: 219
file content (48 lines) | stat: -rwxr-xr-x 1,380 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/usr/bin/env bash

set -e

CONVERTED_MODEL="${1:-"$CONVERTED_MODEL"}"
QUANTIZED_TYPE="${2:-"$QUANTIZED_TYPE"}"
TOKEN_EMBD_TYPE="${3:-"${TOKEN_EMBD_TYPE}"}"
OUTPUT_TYPE="${4:-"${OUTPUT_TYPE}"}"
QUANTIZED_MODEL=$CONVERTED_MODEL

# Final check if we have a model path
if [ -z "$CONVERTED_MODEL" ]; then
    echo "Error: Model path must be provided either as:" >&2
    echo "  1. Command line argument" >&2
    echo "  2. CONVERTED_MODEL environment variable" >&2
    exit 1
fi

if [ -z "$QUANTIZED_TYPE" ]; then
    echo "Error: QUANTIZED_TYPE is required" >&2
    exit 1
fi

echo $CONVERTED_MODEL

# Process the quantized model filename
if [[ "$QUANTIZED_MODEL" == *.gguf ]]; then
    # Remove .gguf suffix, add quantized type, then add .gguf back
    BASE_NAME="${QUANTIZED_MODEL%.gguf}"
    QUANTIZED_MODEL="${BASE_NAME}-${QUANTIZED_TYPE}.gguf"
else
    echo "Error: QUANTIZED_MODEL must end with .gguf extension" >&2
    exit 1
fi

cmake --build ../../build --target llama-quantize -j8

echo $TOKEN_EMBD_TYPE
echo $OUTPUT_TYPE

CMD_ARGS=("../../build/bin/llama-quantize")
[[ -n "$TOKEN_EMBD_TYPE" ]] && CMD_ARGS+=("--token-embedding-type" "$TOKEN_EMBD_TYPE")
[[ -n "$OUTPUT_TYPE" ]]     && CMD_ARGS+=("--output-tensor-type" "$OUTPUT_TYPE")
CMD_ARGS+=("$CONVERTED_MODEL" "$QUANTIZED_MODEL" "$QUANTIZED_TYPE")

"${CMD_ARGS[@]}"

echo "Quantized model saved to: $QUANTIZED_MODEL"