File: test_04_stuttered.py

package info (click to toggle)
curl 8.19.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 31,884 kB
  • sloc: ansic: 200,254; perl: 21,116; python: 10,390; sh: 6,691; makefile: 1,507; pascal: 240; cpp: 196
file content (127 lines) | stat: -rw-r--r-- 5,919 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
#                                  _   _ ____  _
#  Project                     ___| | | |  _ \| |
#                             / __| | | | |_) | |
#                            | (__| |_| |  _ <| |___
#                             \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
from typing import Tuple, List, Dict
import pytest

from testenv import Env, CurlClient


log = logging.getLogger(__name__)


@pytest.mark.skipif(condition=Env().slow_network, reason="not suitable for slow network tests")
@pytest.mark.skipif(condition=Env().ci_run, reason="not suitable for CI runs")
class TestStuttered:

    # download 1 file, check that delayed response works in general
    @pytest.mark.parametrize("proto", Env.http_protos())
    def test_04_01_download_1(self, env: Env, httpd, nghttpx, proto):
        count = 1
        curl = CurlClient(env=env)
        urln = f'https://{env.authority_for(env.domain1, proto)}' \
            f'/curltest/tweak?id=[0-{count - 1}]'\
            '&chunks=100&chunk_size=100&chunk_delay=10ms'
        r = curl.http_download(urls=[urln], alpn_proto=proto)
        r.check_response(count=1, http_status=200)

    # download 50 files in 100 chunks a 100 bytes with 10ms delay between
    # prepend 100 file requests to warm up connection processing limits
    # (Apache2 increases # of parallel processed requests after successes)
    @pytest.mark.parametrize("proto", Env.http_mplx_protos())
    def test_04_02_100_100_10(self, env: Env, httpd, nghttpx, proto):
        count = 50
        warmups = 100
        curl = CurlClient(env=env)
        url1 = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-{warmups-1}]'
        urln = f'https://{env.authority_for(env.domain1, proto)}' \
            f'/curltest/tweak?id=[0-{count-1}]'\
            '&chunks=100&chunk_size=100&chunk_delay=10ms'
        r = curl.http_download(urls=[url1, urln], alpn_proto=proto,
                               extra_args=['--parallel'])
        r.check_response(count=warmups+count, http_status=200)
        assert r.total_connects == 1
        t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total')
        if t_max < (5 * t_min) and t_min < 2:
            log.warning(f'avg time of transfer: {t_avg} [{i_min}={t_min}, {i_max}={t_max}]')

    # download 50 files in 1000 chunks a 10 bytes with 1ms delay between
    # prepend 100 file requests to warm up connection processing limits
    # (Apache2 increases # of parallel processed requests after successes)
    @pytest.mark.parametrize("proto", Env.http_mplx_protos())
    def test_04_03_1000_10_1(self, env: Env, httpd, nghttpx, proto):
        count = 50
        warmups = 100
        curl = CurlClient(env=env)
        url1 = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-{warmups-1}]'
        urln = f'https://{env.authority_for(env.domain1, proto)}' \
            f'/curltest/tweak?id=[0-{count - 1}]'\
            '&chunks=1000&chunk_size=10&chunk_delay=100us'
        r = curl.http_download(urls=[url1, urln], alpn_proto=proto,
                               extra_args=['--parallel'])
        r.check_response(count=warmups+count, http_status=200)
        assert r.total_connects == 1
        t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total')
        if t_max < (5 * t_min):
            log.warning(f'avg time of transfer: {t_avg} [{i_min}={t_min}, {i_max}={t_max}]')

    # download 50 files in 10000 chunks a 1 byte with 10us delay between
    # prepend 100 file requests to warm up connection processing limits
    # (Apache2 increases # of parallel processed requests after successes)
    @pytest.mark.parametrize("proto", Env.http_mplx_protos())
    def test_04_04_1000_10_1(self, env: Env, httpd, nghttpx, proto):
        count = 50
        warmups = 100
        curl = CurlClient(env=env)
        url1 = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-{warmups-1}]'
        urln = f'https://{env.authority_for(env.domain1, proto)}' \
            f'/curltest/tweak?id=[0-{count - 1}]'\
            '&chunks=10000&chunk_size=1&chunk_delay=50us'
        r = curl.http_download(urls=[url1, urln], alpn_proto=proto,
                               extra_args=['--parallel'])
        r.check_response(count=warmups+count, http_status=200)
        assert r.total_connects == 1
        t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total')
        if t_max < (5 * t_min):
            log.warning(f'avg time of transfer: {t_avg} [{i_min}={t_min}, {i_max}={t_max}]')

    def stats_spread(self, stats: List[Dict], key: str) -> Tuple[float, int, float, int, float]:
        stotals = 0.0
        s_min = 100.0
        i_min = -1
        s_max = 0.0
        i_max = -1
        for idx, s in enumerate(stats):
            val = float(s[key])
            stotals += val
            if val > s_max:
                s_max = val
                i_max = idx
            if val < s_min:
                s_min = val
                i_min = idx
        return stotals/len(stats), i_min, s_min, i_max, s_max