1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
|
#!/usr/bin/env python
#
# Public Domain 2014-2019 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Generated from runner/read_write_heavy.wtperf originally, then hand edited.
# A short run demonstrating how read or write threads can be synchronized.
import sys
from runner import *
from wiredtiger import *
from workgen import *
context = Context()
conn_config = ""
conn_config += ",cache_size=2GB,eviction=(threads_max=8),log=(enabled=true),session_max=250,statistics=(fast),statistics_log=(wait=1,json)" # explicitly added
conn = wiredtiger_open("WT_TEST", "create," + conn_config)
s = conn.open_session("")
wtperf_table_config = "key_format=S,value_format=S," +\
"exclusive=true,allocation_size=4kb," +\
"internal_page_max=64kb,leaf_page_max=4kb,split_pct=100,"
compress_table_config = "block_compressor=snappy,"
table_config = "memory_page_max=10m,leaf_value_max=64MB,checksum=on,split_pct=90,type=file,log=(enabled=false),leaf_page_max=32k,block_compressor=snappy"
tables = []
table_count = 100
for i in range(0, table_count):
tname = "table:test" + str(i)
table = Table(tname)
s.create(tname, wtperf_table_config +\
compress_table_config + table_config)
table.options.key_size = 20
table.options.value_size = 7000
tables.append(table)
populate_threads = 4
icount = 4000000
# There are multiple tables to be filled during populate,
# the icount is split between them all.
pop_ops = Operation(Operation.OP_INSERT, tables[0])
pop_ops = op_multi_table(pop_ops, tables)
nops_per_thread = icount // (populate_threads * table_count)
pop_thread = Thread(pop_ops * nops_per_thread)
pop_workload = Workload(context, populate_threads * pop_thread)
pop_workload.run(conn)
print('populate complete')
# Log like file, requires that logging be enabled in the connection config.
log_name = "table:log"
s.create(log_name, wtperf_table_config + "key_format=S,value_format=S," + compress_table_config + table_config + ",log=(enabled=true)")
log_table = Table(log_name)
ops = Operation(Operation.OP_UPDATE, tables[0])
ops = op_multi_table(ops, tables, False)
ops = op_log_like(ops, log_table, 0)
thread0 = Thread(ops)
# These operations include log_like operations, which will increase the number
# of insert/update operations by a factor of 2.0. This may cause the
# actual operations performed to be above the throttle.
thread0.options.throttle=11
thread0.options.throttle_burst=0
ops = Operation(Operation.OP_SEARCH, tables[0])
ops = op_multi_table(ops, tables, False)
ops = op_log_like(ops, log_table, 0)
thread1 = Thread(ops)
thread1.options.throttle=60
thread1.options.throttle_burst=0
ops = Operation(Operation.OP_SLEEP, "60") + \
Operation(Operation.OP_CHECKPOINT, "")
checkpoint_thread = Thread(ops)
ops = Operation(Operation.OP_SLEEP, "0.1") + \
Operation(Operation.OP_LOG_FLUSH, "")
logging_thread = Thread(ops)
############################################################################
# This part was added to the generated file.
# Add threads that do a bunch of operations and sleep, all in a loop.
# The write threads are in two groups that synchronize with each other
# every 20 seconds. The read threads are in two groups that synchronize
# with each other every 16 seconds. There is a collective synchronization
# every 80 seconds.
ops = Operation(Operation.OP_UPDATE, tables[0])
ops = op_multi_table(ops, tables, False)
ops = op_log_like(ops, log_table, 0)
ops = timed(5.0, ops) + sleep(5.0)
thread_big_10 = Thread(ops)
thread_big_10.synchronized = True
ops = Operation(Operation.OP_UPDATE, tables[0])
ops = op_multi_table(ops, tables, False)
ops = op_log_like(ops, log_table, 0)
ops = timed(5.0, ops) + sleep(15.0)
thread_big_20 = Thread(ops)
thread_big_20.synchronized = True
ops = Operation(Operation.OP_SEARCH, tables[0])
ops = op_multi_table(ops, tables, False)
ops = op_log_like(ops, log_table, 0)
ops = timed(4.0, ops) + sleep(4.0)
thread_bigread_8 = Thread(ops)
thread_bigread_8.synchronized = True
ops = Operation(Operation.OP_SEARCH, tables[0])
ops = op_multi_table(ops, tables, False)
ops = op_log_like(ops, log_table, 0)
ops = timed(4.0, ops) + sleep(12.0)
thread_bigread_16 = Thread(ops)
thread_bigread_16.synchronized = True
# End of added section.
# The new threads will also be added to the workload below.
############################################################################
workload = Workload(context, 20 * thread0 + 20 * thread1 + checkpoint_thread + logging_thread + 50 * thread_big_10 + 50 * thread_big_20 + 50 * thread_bigread_8 + 50 * thread_bigread_16)
workload.options.report_interval=1
workload.options.run_time=900
workload.options.sample_rate=1
workload.options.warmup=0
workload.options.sample_interval_ms = 1000
workload.run(conn)
latency_filename = "WT_TEST/latency.out"
latency.workload_latency(workload, latency_filename)
|