File: test-model-load-cancel.cpp

package info (click to toggle)
llama.cpp 5882%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 34,020 kB
  • sloc: cpp: 189,548; ansic: 115,889; python: 24,977; objc: 6,050; lisp: 5,741; sh: 5,571; makefile: 1,293; javascript: 807; xml: 259
file content (27 lines) | stat: -rw-r--r-- 763 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#include "llama.h"
#include "get-model.h"

#include <cstdlib>

int main(int argc, char *argv[] ) {
    auto * model_path = get_model_or_exit(argc, argv);
    auto * file = fopen(model_path, "r");
    if (file == nullptr) {
        fprintf(stderr, "no model at '%s' found\n", model_path);
        return EXIT_FAILURE;
    }

    fprintf(stderr, "using '%s'\n", model_path);
    fclose(file);

    llama_backend_init();
    auto params = llama_model_params{};
    params.use_mmap = false;
    params.progress_callback = [](float progress, void * ctx){
        (void) ctx;
        return progress > 0.50;
    };
    auto * model = llama_model_load_from_file(model_path, params);
    llama_backend_free();
    return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
}