File: rules

package info (click to toggle)
llama.cpp 5882%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 34,020 kB
  • sloc: cpp: 189,548; ansic: 115,889; python: 24,977; objc: 6,050; lisp: 5,741; sh: 5,571; makefile: 1,293; javascript: 807; xml: 259
file content (90 lines) | stat: -rwxr-xr-x 2,591 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#!/usr/bin/make -f
#export DH_VERBOSE = 1

export DEB_BUILD_MAINT_OPTIONS = hardening=+all

export PYBUILD_NAME=gguf
export PYBUILD_DIR=gguf-py

# Both llama.cpp and the ggml it depends on are private for now
RPATH=/usr/lib/$(DEB_HOST_MULTIARCH)/ggml:/usr/lib/$(DEB_HOST_MULTIARCH)/llama

# FOR DEB_VERSION_UPSTREAM
include /usr/share/dpkg/pkg-info.mk

CMAKE_FLAGS = -DCMAKE_INSTALL_RPATH=$(RPATH) \
              -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=OFF \
	      -DCMAKE_PREFIX_PATH=/usr/lib/$(DEB_HOST_MULTIARCH)/ggml/cmake-private \
	      -DCMAKE_BUILD_TYPE=RelWithDebInfo \
	      -DLLAMA_BUILD_NUMBER=$(subst +dfsg,,$(DEB_VERSION_UPSTREAM)) \
	      -DLLAMA_BUILD_COMMIT=Debian \
	      -DLLAMA_USE_SYSTEM_GGML=ON

%:
	dh $@ --buildsystem=cmake

override_dh_auto_configure-arch:
	dh_auto_configure -- $(CMAKE_FLAGS)

override_dh_auto_configure-indep:
	dh_auto_configure --buildsystem=pybuild

execute_after_dh_auto_build-arch:
	mkdir -p man/man1
	for progname in obj-*/bin/llama-*; do \
		progname_base=$$(basename $$progname); \
		[ "$$progname_base" != "llama-gen-docs" ] || continue ; \
		help2man \
			--source=debian \
			--version-string=$(DEB_VERSION_UPSTREAM) \
			--name=$$progname_base \
			--section=1 \
			--no-info \
			--no-discard-stderr \
			$$progname > man/man1/$$progname_base.1; \
	done
	
execute_after_dh_auto_install-arch:
	# Bash completion file
	mkdir -p completions
	LD_LIBRARY_PATH=debian/tmp/usr/lib/${DEB_HOST_MULTIARCH}/llama \
		debian/tmp/usr/bin/llama-cli --device none --completion-bash \
		> completions/llama-cli

	for progname in debian/tmp/usr/bin/llama-*; do \
		progname_base=$$(basename $$progname); \
		[ "$$progname_base" != "llama-cli" ] || continue ; \
		ln -r -s completions/llama-cli completions/$$progname_base; \
	done

override_dh_auto_build-indep:
	dh_auto_build --buildsystem=pybuild

# No tests for now, as many need some kind of model we don't have
override_dh_auto_test:
	:

override_dh_auto_install-indep:
	dh_auto_install --buildsystem=pybuild
	mkdir -p man/man1
	for progname in debian/python3-gguf/usr/bin/gguf-*; do \
		progname_base=$$(basename $$progname); \
		[ "$$progname_base" != "gguf-editor-gui" ] || continue ; \
		PYTHONPATH=gguf-py help2man \
			--source=debian \
			--version-string=$(DEB_VERSION_UPSTREAM) \
			--name=$$progname_base \
			--section=1 \
			--no-info \
			--no-discard-stderr \
			$$progname > man/man1/$$progname_base.1; \
	done

override_dh_auto_clean-indep:
	dh_auto_clean --buildsystem=pybuild

execute_after_dh_auto_clean:
	rm -f common/build-info.cpp

override_dh_compress:
	dh_compress -X.py