File: test-autorelease.cpp

package info (click to toggle)
llama.cpp 7965%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 75,824 kB
  • sloc: cpp: 348,634; ansic: 49,792; python: 33,481; lisp: 10,836; sh: 6,289; objc: 1,392; javascript: 924; xml: 384; makefile: 233
file content (24 lines) | stat: -rw-r--r-- 711 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
// ref: https://github.com/ggml-org/llama.cpp/issues/4952#issuecomment-1892864763

#include <cstdio>
#include <string>
#include <thread>

#include "llama.h"
#include "get-model.h"

// This creates a new context inside a pthread and then tries to exit cleanly.
int main(int argc, char ** argv) {
    auto * model_path = get_model_or_exit(argc, argv);

    std::thread([&model_path]() {
        llama_backend_init();
        auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
        auto * ctx = llama_init_from_model(model, llama_context_default_params());
        llama_free(ctx);
        llama_model_free(model);
        llama_backend_free();
    }).join();

    return 0;
}