1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
|
local prefix = KEYS[1]
local key = KEYS[2]
local precall_key = KEYS[3]
local data = ARGV[1]
local dnfs = cjson.decode(ARGV[2])
local timeout = tonumber(ARGV[3])
if precall_key ~= prefix and redis.call('exists', precall_key) == 0 then
-- Cached data was invalidated during the function call. The data is
-- stale and should not be cached.
return
end
-- Write data to cache
redis.call('setex', key, timeout, data)
-- A pair of funcs
-- NOTE: we depend here on keys order being stable
local conj_schema = function (conj)
local parts = {}
for field, _ in pairs(conj) do
table.insert(parts, field)
end
return table.concat(parts, ',')
end
local conj_cache_key = function (db_table, conj)
local parts = {}
for field, val in pairs(conj) do
table.insert(parts, field .. '=' .. tostring(val))
end
return prefix .. 'conj:' .. db_table .. ':' .. table.concat(parts, '&')
end
-- Update schemes and invalidators
for db_table, disj in pairs(dnfs) do
for _, conj in ipairs(disj) do
-- Ensure scheme is known
redis.call('sadd', prefix .. 'schemes:' .. db_table, conj_schema(conj))
-- Add new cache_key to list of dependencies
local conj_key = conj_cache_key(db_table, conj)
redis.call('sadd', conj_key, key)
-- NOTE: an invalidator should live longer than any key it references.
-- So we update its ttl on every key if needed.
-- NOTE: we also can't use "EXPIRE conj_key timeout GT" because it will have no effect on
-- newly created and thus involatile conj keys.
local conj_ttl = redis.call('ttl', conj_key)
if conj_ttl < timeout then
-- We set conj_key life with a margin over key life to call expire rarer
-- And add few extra seconds to be extra safe
redis.call('expire', conj_key, timeout * 2 + 10)
end
end
end
|