File: s3_client_test.c

package info (click to toggle)
aws-crt-python 0.20.4%2Bdfsg-1~bpo12%2B1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-backports
  • size: 72,656 kB
  • sloc: ansic: 381,805; python: 23,008; makefile: 6,251; sh: 4,536; cpp: 699; ruby: 208; java: 77; perl: 73; javascript: 46; xml: 11
file content (389 lines) | stat: -rw-r--r-- 18,717 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
/**
 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * SPDX-License-Identifier: Apache-2.0.
 */

#include "aws/s3/private/s3_client_impl.h"
#include "aws/s3/private/s3_request.h"
#include "aws/s3/private/s3_util.h"
#include "s3_tester.h"

#include <aws/common/clock.h>
#include <aws/testing/aws_test_harness.h>

#define TEST_CASE(NAME)                                                                                                \
    AWS_TEST_CASE(NAME, s_test_##NAME);                                                                                \
    static int s_test_##NAME(struct aws_allocator *allocator, void *ctx)

#define DEFINE_HEADER(NAME, VALUE)                                                                                     \
    { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), }

static void s_init_mock_s3_request_upload_part_timeout(
    struct aws_s3_request *mock_request,
    uint64_t original_upload_timeout_ms,
    uint64_t request_time_ns,
    uint64_t response_to_first_byte_time_ns) {
    mock_request->upload_timeout_ms = (size_t)original_upload_timeout_ms;
    struct aws_s3_request_metrics *metrics = mock_request->send_data.metrics;

    metrics->time_metrics.send_start_timestamp_ns = 0;
    metrics->time_metrics.send_end_timestamp_ns = 0;
    metrics->time_metrics.receive_end_timestamp_ns = request_time_ns;
    metrics->time_metrics.receive_start_timestamp_ns = response_to_first_byte_time_ns;
}

static int s_starts_upload_retry(struct aws_s3_client *client, struct aws_s3_request *mock_request) {
    uint64_t average_time_ns = aws_timestamp_convert(
        300, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); /* 0.3 Secs, average for upload a part */
    AWS_ZERO_STRUCT(client->synced_data.upload_part_stats);

    s_init_mock_s3_request_upload_part_timeout(mock_request, 0, average_time_ns, average_time_ns);
    for (size_t i = 0; i < 10; i++) {
        /* Mock a number of requests completed with the large time for the request */
        aws_s3_client_update_upload_part_timeout(client, mock_request, AWS_ERROR_SUCCESS);
    }

    /* Check that retry should be turned off */
    ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout);
    size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
    /* We start the retry with a default 1 sec timeout */
    ASSERT_UINT_EQUALS(1000, current_timeout_ms);
    return AWS_OP_SUCCESS;
}

/* Test the aws_s3_client_update_upload_part_timeout works as expected */
TEST_CASE(client_update_upload_part_timeout) {
    (void)ctx;
    struct aws_s3_tester tester;
    AWS_ZERO_STRUCT(tester);
    ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
    struct aws_s3_client *client = NULL;
    struct aws_s3_tester_client_options client_options = {
        .part_size = MB_TO_BYTES(8),
        .tls_usage = AWS_S3_TLS_DISABLED,
    };
    ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));
    struct aws_s3_request mock_request;
    struct aws_s3_request_metrics metrics;
    AWS_ZERO_STRUCT(mock_request);
    AWS_ZERO_STRUCT(metrics);
    mock_request.send_data.metrics = &metrics;

    uint64_t large_time_ns =
        aws_timestamp_convert(5500, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); /* 5.5 Secs, larger than 5 secs */

    uint64_t average_time_ns = aws_timestamp_convert(
        250, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); /* 0.25 Secs, close to average for upload a part */

    size_t init_count = 10;
    {
        /* 1. If the request time is larger than 5 secs, we don't do retry */
        AWS_ZERO_STRUCT(client->synced_data.upload_part_stats);
        s_init_mock_s3_request_upload_part_timeout(&mock_request, 0, large_time_ns, average_time_ns);

        /* If request timeout happened before the retry started, it has no effects. */
        aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        for (size_t i = 0; i < init_count; i++) {
            /* Mock a number of requests completed with the large time for the request */
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS);
        }

        /* Check that retry should be turned off */
        ASSERT_TRUE(client->synced_data.upload_part_stats.stop_timeout);
        size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        ASSERT_UINT_EQUALS(0, current_timeout_ms);
    }

    {
        ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request));
        /**
         * 3. Once a request finishes without timeout, use the average response_to_first_byte_time +
         *      g_expect_timeout_offset_ms as our expected timeout. (TODO: The real expected timeout should be a P99 of
         *      all the requests.)
         *  3.1 Adjust the current timeout against the expected timeout, via 0.99 * <current timeout> + 0.01 * <expected
         *      timeout> to get closer to the expected timeout.
         */
        s_init_mock_s3_request_upload_part_timeout(
            &mock_request,
            aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL),
            average_time_ns,
            average_time_ns);

        /* After 1000 runs, we have the timeout match the "expected" (average time + g_expect_timeout_offset_ms) timeout
         */
        for (size_t i = 0; i < 1000; i++) {
            /* Mock a number of requests completed with the large time for the request */
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS);
        }
        size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        ASSERT_UINT_EQUALS(
            aws_timestamp_convert(average_time_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL) +
                g_expect_timeout_offset_ms,
            current_timeout_ms);

        /* will not change after another 1k run */
        for (size_t i = 0; i < 1000; i++) {
            /* Mock a number of requests completed with the large time for the request */
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS);
        }
        ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout);
        current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        /* After 1000 runs, we have the timeout match the "expected" (average time + g_expect_timeout_offset_ms) timeout
         */
        ASSERT_UINT_EQUALS(
            aws_timestamp_convert(average_time_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL) +
                g_expect_timeout_offset_ms,
            current_timeout_ms);
    }

    {
        ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request));
        /**
         *  4.1 If timeout rate is larger than 0.1%, we increase the timeout by 100ms (Check the timeout when the
         *      request was made, if the updated timeout is larger than the expected, skip update).
         */
        /* Set current timeout rate to be around 0.1% */
        client->synced_data.upload_part_stats.timeout_rate_tracking.num_completed = 800;
        client->synced_data.upload_part_stats.timeout_rate_tracking.num_failed = 1;

        /* Update the timeout as the rate is larger than 0.1% */
        s_init_mock_s3_request_upload_part_timeout(&mock_request, 1000 /*original_upload_timeout_ms*/, 0, 0);
        aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        /* 1.1 secs */
        ASSERT_UINT_EQUALS(1100, current_timeout_ms);
        /* The same timeout applied to multiple requests made before, and the timeout happened right after we already
         * updated it. The timeout will not be updated again. */
        aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout);
        current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        /* 1.1 secs, still */
        ASSERT_UINT_EQUALS(1100, current_timeout_ms);
    }

    {
        ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request));
        /**
         * 4.2 If timeout rate is larger than 1%, we increase the timeout by 1 secs (If needed). And clear the rate
         *      to get the exact rate with new timeout.
         */

        /* Assume our first batch requests all failed with the 1 sec timeout. As the request around 3 secs to
         * complete */

        uint64_t real_response_time_ns =
            aws_timestamp_convert(3000 - g_expect_timeout_offset_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
        s_init_mock_s3_request_upload_part_timeout(
            &mock_request, 1000 /*original_upload_timeout_ms*/, real_response_time_ns, real_response_time_ns);

        /* First failure will not change the timeout, as we use the ceiling of 1% rate */
        aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        ASSERT_UINT_EQUALS(1000, current_timeout_ms);

        /* Updated at the second timeout */
        aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        ASSERT_UINT_EQUALS(2000, current_timeout_ms);
        /* The rest of the batch failure will not affect the timeout */
        for (size_t i = 0; i < 10; i++) {
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        }
        current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        /* still 2 secs */
        ASSERT_UINT_EQUALS(2000, current_timeout_ms);

        /* The 2 secs will still fail the whole batch */
        s_init_mock_s3_request_upload_part_timeout(
            &mock_request,
            current_timeout_ms /*original_upload_timeout_ms*/,
            real_response_time_ns,
            real_response_time_ns);
        for (size_t i = 0; i < 10; i++) {
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        }
        current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        /* 3 secs now */
        ASSERT_UINT_EQUALS(3000, current_timeout_ms);

        /* 3 secs will result in around 0.1% failure, and we are okay with that */
        s_init_mock_s3_request_upload_part_timeout(
            &mock_request,
            current_timeout_ms /*original_upload_timeout_ms*/,
            real_response_time_ns,
            real_response_time_ns);
        /* 1 failure, and others all succeed */
        aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
        for (size_t i = 0; i < 10; i++) {
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS);
        }
        /* still 3 secs */
        current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        ASSERT_UINT_EQUALS(3000, current_timeout_ms);
        ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout);
    }

    {
        ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request));
        /* 4.3 Once the timeout is larger than 5 secs, we stop the process. */
        s_init_mock_s3_request_upload_part_timeout(&mock_request, 1000 /*original_upload_timeout_ms*/, 0, 0);

        for (size_t i = 0; i < 10; i++) {
            /* Make two continuous timeout request with updated timeout */
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
            aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT);
            size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
            s_init_mock_s3_request_upload_part_timeout(
                &mock_request, current_timeout_ms /*original_upload_timeout_ms*/, 0, 0);
        }
        /* Timeout stopped */
        size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms);
        ASSERT_UINT_EQUALS(0, current_timeout_ms);
        ASSERT_TRUE(client->synced_data.upload_part_stats.stop_timeout);
    }

    aws_s3_client_release(client);
    aws_s3_tester_clean_up(&tester);
    return AWS_OP_SUCCESS;
}

/* Test meta request can override the part size as expected */
TEST_CASE(client_meta_request_override_part_size) {
    (void)ctx;
    struct aws_s3_tester tester;
    AWS_ZERO_STRUCT(tester);
    ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
    struct aws_s3_client *client = NULL;
    struct aws_s3_tester_client_options client_options = {
        .part_size = MB_TO_BYTES(8),
        .tls_usage = AWS_S3_TLS_DISABLED,
    };
    ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));

    struct aws_string *host_name =
        aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region);
    struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(host_name);
    struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str("/mytest");

    size_t override_part_size = MB_TO_BYTES(10);
    size_t content_length =
        MB_TO_BYTES(20); /* Let the content length larger than the override part size to make sure we do MPU */

    /* MPU put object */
    struct aws_input_stream_tester_options stream_options = {
        .autogen_length = content_length,
    };
    struct aws_input_stream *input_stream = aws_input_stream_new_tester(allocator, &stream_options);

    struct aws_http_message *put_messages = aws_s3_test_put_object_request_new(
        allocator, &host_cur, g_test_body_content_type, test_object_path, input_stream, 0 /*flags*/);

    struct aws_s3_meta_request_options meta_request_options = {
        .message = put_messages,
        .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
        .part_size = override_part_size,
    };
    struct aws_s3_meta_request *put_meta_request = client->vtable->meta_request_factory(client, &meta_request_options);
    ASSERT_UINT_EQUALS(put_meta_request->part_size, override_part_size);

    /* auto ranged Get Object */
    struct aws_http_message *get_message = aws_s3_test_get_object_request_new(
        allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB);

    struct aws_s3_meta_request_options get_meta_request_options = {
        .message = get_message,
        .type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT,
        .part_size = override_part_size,
    };

    struct aws_s3_meta_request *get_meta_request =
        client->vtable->meta_request_factory(client, &get_meta_request_options);
    ASSERT_UINT_EQUALS(get_meta_request->part_size, override_part_size);

    aws_http_message_release(put_messages);
    aws_s3_meta_request_release(put_meta_request);
    aws_http_message_release(get_message);
    aws_s3_meta_request_release(get_meta_request);
    aws_string_destroy(host_name);
    aws_s3_client_release(client);
    aws_input_stream_release(input_stream);
    aws_s3_tester_clean_up(&tester);

    return AWS_OP_SUCCESS;
}

/* Test meta request can override the multipart upload threshold as expected */
TEST_CASE(client_meta_request_override_multipart_upload_threshold) {
    (void)ctx;
    struct aws_s3_tester tester;
    ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));

    struct aws_s3_client_config client_config = {
        .part_size = MB_TO_BYTES(8),
        .multipart_upload_threshold = MB_TO_BYTES(15),
    };

    ASSERT_SUCCESS(aws_s3_tester_bind_client(
        &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING));

    struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config);

    ASSERT_TRUE(client != NULL);

    struct aws_string *host_name =
        aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region);
    struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(host_name);
    struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str("/mytest");

    size_t override_multipart_upload_threshold = MB_TO_BYTES(20);
    size_t content_length =
        MB_TO_BYTES(20); /* Let the content length larger than the override part size to make sure we do MPU */

    /* MPU put object */
    struct aws_input_stream_tester_options stream_options = {
        .autogen_length = content_length,
    };
    struct aws_input_stream *input_stream = aws_input_stream_new_tester(allocator, &stream_options);

    struct aws_http_message *put_messages = aws_s3_test_put_object_request_new(
        allocator, &host_cur, g_test_body_content_type, test_object_path, input_stream, 0 /*flags*/);

    {
        /* Content length is smaller than the override multipart_upload_threshold */
        struct aws_s3_meta_request_options meta_request_options = {
            .message = put_messages,
            .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
            .multipart_upload_threshold = override_multipart_upload_threshold,
        };
        struct aws_s3_meta_request *put_meta_request =
            client->vtable->meta_request_factory(client, &meta_request_options);

        /* Part size will be 0, as we don't use MPU */
        ASSERT_UINT_EQUALS(put_meta_request->part_size, 0);
        aws_s3_meta_request_release(put_meta_request);
    }

    {
        /* meta request override the part size, so the override part size will be used as the multipart upload threshold
         */
        struct aws_s3_meta_request_options meta_request_options = {
            .message = put_messages,
            .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
            .part_size = override_multipart_upload_threshold,
        };
        struct aws_s3_meta_request *put_meta_request =
            client->vtable->meta_request_factory(client, &meta_request_options);

        /* Part size will be 0, as we don't use MPU */
        ASSERT_UINT_EQUALS(put_meta_request->part_size, 0);
        aws_s3_meta_request_release(put_meta_request);
    }

    aws_http_message_release(put_messages);
    aws_string_destroy(host_name);
    aws_s3_client_release(client);
    aws_input_stream_release(input_stream);
    aws_s3_tester_clean_up(&tester);

    return AWS_OP_SUCCESS;
}