File: proxylimits.lua

package info (click to toggle)
memcached 1.6.39-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 6,320 kB
  • sloc: ansic: 62,281; perl: 12,500; sh: 4,569; makefile: 468; python: 402; xml: 59
file content (89 lines) | stat: -rw-r--r-- 3,042 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
-- need to use a global counter to avoid losing it on reload.
-- not really sure this'll work forever, but even if it doesn't I should allow
-- some method of persisting data across reloads.
if reload_count == nil then
    reload_count = 0
end

function mcp_config_pools(old)
    mcp.backend_read_timeout(4)
    mcp.backend_connect_timeout(5)
    reload_count = reload_count + 1

    if reload_count == 1 then
        -- set a low request limit.
        mcp.active_req_limit(4)
        local b1 = mcp.backend('b1', '127.0.0.1', 11711)
        local b2 = mcp.backend('b2', '127.0.0.1', 11712)
        local b3 = mcp.backend('b3', '127.0.0.1', 11713)

        -- Direct all traffic at a single backend to simplify the test.
        local pools = {
            test = mcp.pool({b1}),
            hold = mcp.pool({b2, b3})
        }
        return pools
    elseif reload_count == 2 then
        -- removing the request limit.
        mcp.active_req_limit(0)
        local b1 = mcp.backend('b1', '127.0.0.1', 11711)
        local b2 = mcp.backend('b2', '127.0.0.1', 11712)
        local b3 = mcp.backend('b3', '127.0.0.1', 11713)

        -- Direct all traffic at a single backend to simplify the test.
        local pools = {
            test = mcp.pool({b1}),
            hold = mcp.pool({b2, b3})
        }
        return pools
    elseif reload_count == 3 or reload_count == 4 then
        -- adding the memory buffer limit (abusrdly low)
        mcp.buffer_memory_limit(20)
        if reload_count == 4 then
            -- raise it a bit but still limited.
            mcp.buffer_memory_limit(200)
        end
        local b1 = mcp.backend('b1', '127.0.0.1', 11711)
        local b2 = mcp.backend('b2', '127.0.0.1', 11712)
        local b3 = mcp.backend('b3', '127.0.0.1', 11713)

        -- Direct all traffic at a single backend to simplify the test.
        local pools = {
            test = mcp.pool({b1}),
            hold = mcp.pool({b2, b3})
        }
        return pools
    elseif reload_count == 5 then
        -- remove the buffer limit entirely.
        mcp.buffer_memory_limit(0)
        local b1 = mcp.backend('b1', '127.0.0.1', 11711)
        local b2 = mcp.backend('b2', '127.0.0.1', 11712)
        local b3 = mcp.backend('b3', '127.0.0.1', 11713)

        -- Direct all traffic at a single backend to simplify the test.
        local pools = {
            test = mcp.pool({b1}),
            hold = mcp.pool({b2, b3})
        }
        return pools
    end
end

-- At least to start we don't need to test every command, but we should do
-- some tests against the two broad types of commands (gets vs sets with
-- payloads)
function mcp_config_routes(zones)
    local fg = mcp.funcgen_new()
    local h = fg:new_handle(zones["test"])
    fg:ready({
        f = function(rctx)
            return function(r)
                return rctx:enqueue_and_wait(r, h)
            end
        end
    })
    mcp.attach(mcp.CMD_MG, fg)
    mcp.attach(mcp.CMD_MS, fg)
    mcp.attach(mcp.CMD_GET, fg)
    mcp.attach(mcp.CMD_SET, fg)
end