File: fdo-cache

package info (click to toggle)
mesa 25.3.2-2
  • links: PTS, VCS
  • area: main
  • in suites: forky
  • size: 322,268 kB
  • sloc: ansic: 2,212,173; xml: 1,032,268; cpp: 519,750; python: 81,988; asm: 40,568; yacc: 11,976; lisp: 5,067; lex: 3,452; sh: 1,049; makefile: 224
file content (92 lines) | stat: -rw-r--r-- 2,959 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
proxy_cache_path /var/cache/nginx/ levels=1:2 keys_zone=my_cache:10m max_size=50g inactive=2w use_temp_path=off;

server {
	listen 10.42.0.1:80 default_server;
	listen 127.0.0.1:80 default_server;
	listen [::]:80 default_server;
	resolver 8.8.8.8;

	root /var/www/html;

	# Add index.php to the list if you are using PHP
	index index.html index.htm index.nginx-debian.html;

	server_name _;

	location / {
		# First attempt to serve request as file, then
		# as directory, then fall back to displaying a 404.
		try_files $uri $uri/ =404;
	}

	location /tmp {
		# Lava server http artifacts to the clients; e.g. for the deploy action
		alias /var/lib/lava/dispatcher/tmp;
	}

	proxy_cache my_cache;

	# Wait for the cache creation when multiple query are done for the same file
	proxy_cache_lock on;
	proxy_cache_lock_age 30m;
	proxy_cache_lock_timeout 1h;

	location /force_cache {
		internal;
		# On some setups the cache headers will indicate to nginx that the
		# artifacts shouldn't be cached, however if we know that that is not valid
		# for lava usage this endpoint allows caching to be forced instead
		proxy_cache_valid 200 48h;
		proxy_ignore_headers Cache-Control Set-Cookie expires;
		include snippets/uri-caching.conf;
	}

	location /fdo_cache {
		internal;
		# As the auth information in the query is being dropped, use
		# the minimal possible cache validity, such that in practise
		# every requests gets revalidated. This avoids
		# unauthenticated downloads from our cache as the cache key doesn't
		# include auth info
		proxy_cache_valid 200 1s;
		proxy_cache_revalidate on;
		proxy_ignore_headers Cache-Control Set-Cookie expires;
		set_by_lua_block $cache_key {
			-- Set the cache key to the uri with the query stripped
			local unescaped =  ngx.unescape_uri(ngx.var.arg_uri);
			local it,err = ngx.re.match(unescaped, "([^?]*).*")
			if not it then
				-- Fallback on the full uri as key if the regexp fails
				return ngx.var.arg_uri;
			end
			return it[1]
		}
		proxy_cache_key $cache_key;
		include snippets/uri-caching.conf;
	}

	location /cache {
		# Gitlabs http server puts everything as no-cache even though
		# the artifacts URLS don't change.
		if ($arg_uri ~*  /.*gitlab.*artifacts(\/|%2F)raw/ ) {
			rewrite ^ /force_cache;
		}

		# fd.o's object storage has an embedded signature for
		# authentication as part of its query. So use an adjusted cache key
		# without the query
		if ($arg_uri ~*  .*your-objectstorage.com(\/|%2F)fdo-opa(\/|%2F)) {
			rewrite ^ /fdo_cache;
		}

		# Set a really low validity together with cache revalidation; Our goal
		# for caching isn't to lower the number of http requests but to
		# lower the amount of data transfer. Also for some test
		# scenarios (typical manual tests) the file at a given url
		# might get modified so avoid confusion by ensuring
		# revalidations happens often.
		proxy_cache_valid 200 10s;
		proxy_cache_revalidate on;
		include snippets/uri-caching.conf;
	}
}