1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
|
# This test verifies that NDB Binlog injector events that overflow the
# cache to disk are instrumented in the performance schema
# tables. Also verifies that a warning is logged when NDB Binary Log
# transactions had events that spilled to disk.
-- source include/have_ndb.inc
-- source include/have_binlog_format_mixed_or_row.inc
# Save original value to allow restoring config before test end
SET @save_ndb_log_cache_size = @@global.ndb_log_cache_size;
SELECT @save_ndb_log_cache_size;
# Configure lowest possible ndb binlog injector cache size to make it possible
# to exceed the size and trigger a "disk spill" further down
SET @@global.ndb_log_cache_size=4096;
# Show that ndb_log_cache_size is a global variable
--error ER_GLOBAL_VARIABLE
SET SESSION ndb_log_cache_size=32768;
-- let $cache_qry = SELECT COUNT_WRITE FROM performance_schema.file_summary_by_event_name WHERE event_name LIKE "%io_cache%"
# Check configured values required by test
SELECT @@global.ndb_log_cache_size;
SELECT @@global.binlog_cache_size;
SELECT @@global.binlog_stmt_cache_size;
SELECT NAME, ENABLED FROM performance_schema.setup_instruments WHERE name LIKE "%file/sql/io_cache%";
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB, lb LONGBLOB) ENGINE = NDB;
-- echo # Insert small transactions that should NOT
-- echo # trigger an IO_CACHE overflow
-- echo # Insert ~2KiB of data per transaction
-- let $before_writes = `$cache_qry`
INSERT INTO t1 VALUES (1, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (2, repeat(0x41, 1024), repeat(0x42, 1024));
save_master_pos;
-- let $after_writes= `$cache_qry`
-- echo
-- echo ## Verify there were no IO_CACHE write events
-- echo
-- let $assert_text= There were 0 writes to IO_CACHE
-- let $assert_cond= $before_writes = $after_writes
-- source include/assert.inc
-- echo
-- echo # Generate larger transactions to force an IO_CACHE
-- echo # overflow and subsequent write to a temporary file.
-- echo # Insert ~8KiB of data per transaction.
-- let $before_writes = $after_writes
SET SESSION AUTOCOMMIT=OFF;
BEGIN;
INSERT INTO t1 VALUES (3, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (4, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (5, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (6, repeat(0x41, 1024), repeat(0x42, 1024));
COMMIT;
SET SESSION AUTOCOMMIT=ON;
INSERT INTO t1 VALUES (7, repeat(0x41, 4096), repeat(0x42, 4096));
save_master_pos;
-- let $after_writes = `$cache_qry`
-- echo
-- echo ## Verify IO_CACHE write events and that warnings were logged
-- echo
-- let $assert_text= There were >0 writes to IO_CACHE
-- let $assert_cond= $before_writes < $after_writes
-- source include/assert.inc
--echo # Check that error log contains message indicating cache spill
-- let $qry= SELECT count(DATA) as warnings FROM performance_schema.error_log WHERE SUBSYSTEM = "NDB" AND DATA REGEXP "Binary log cache data overflowed to disk"
-- let $assert_text= Binary log cache data overflowed to disk
-- let $assert_cond= [$qry, warnings, 1] >= 1
-- source include/assert.inc
# The temporary file used for extending the cache in the above case can be
# seen in performance_schema by quering the file_instances table, the file
# has "ML" as file name prefix:
# SELECT * FROM performance_schema.file_instances
# WHERE EVENT_NAME = "wait/io/file/sql/io_cache";
# FILE_NAME EVENT_NAME OPEN_COUNT
# /tmp/mysqld.1.1/MLfd=66 wait/io/file/sql/io_cache 1
--let $assert_cond = OPEN_COUNT >= 1 FROM performance_schema.file_instances WHERE EVENT_NAME = "wait/io/file/sql/io_cache"
--let $assert_text = The IO_CACHE should spill to disk
--source include/assert.inc
# Supress the provoked message
-- disable_query_log ONCE
call mtr.add_suppression(".*Binary log cache data overflowed to disk.*");
--echo #
--echo # Check that ndb_log_cache_size can be reconfigured at runtime and the
--echo # large write which previously caused cache spill now fits. This is done
--echo # by increasing cache size and doing the large write again.
SET @@global.ndb_log_cache_size=1024*1024;
BEGIN;
INSERT INTO t1 VALUES (8, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (9, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (10, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (11, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (12, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (13, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (14, repeat(0x41, 1024), repeat(0x42, 1024));
INSERT INTO t1 VALUES (15, repeat(0x41, 1024), repeat(0x42, 1024));
COMMIT;
--source suite/ndb/include/ndb_binlog_wait_own_changes.inc
-- let $after_writes_with_large_cache = `$cache_qry`
#echo after_writes_with_large_cache: $after_writes_with_large_cache;
-- let $assert_text= Write with large cache didn't trigger cache overflow
-- let $assert_cond= $after_writes = $after_writes_with_large_cache
-- source include/assert.inc
# Restore changed config to original value
SET @@global.ndb_log_cache_size=@save_ndb_log_cache_size;
-- echo
DROP TABLE t1;
|