1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
|
###############################################
# Purpose: To test advance DD and replication #
###############################################
#### Include Section ####
--source include/have_ndb.inc
--source include/have_binlog_format_mixed_or_row.inc
--source include/ndb_default_cluster.inc
--source include/not_embedded.inc
--source include/ndb_master-slave.inc
######################################################
# Requirment: Cluster DD and replication must be able#
# to handle ALTER tables and indexes and must rpl #
# to the slave correctly #
######################################################
## Test #1 replication of CDD and Alter Tables #####
--echo ***** Test 1 RPL of CDD and Alter *****
--echo ***** Test 1 setup *****
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
INITIAL_SIZE 16M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB;
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
INITIAL_SIZE 4M
ENGINE=NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
INITIAL_SIZE 12M
ENGINE=NDB;
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile02.dat'
INITIAL_SIZE 4M
ENGINE=NDB;
CREATE TABLE t1
(c1 INT NOT NULL PRIMARY KEY,
c2 INT NOT NULL,
c3 INT NOT NULL)
TABLESPACE ts1 STORAGE DISK
ENGINE=NDB;
--echo ***** insert some data *****
let $j= 900;
--disable_query_log
while ($j)
{
eval INSERT INTO t1 VALUES($j,$j*2,$j+3);
dec $j;
}
--enable_query_log
--echo ***** Select from Master *****
SELECT * FROM t1 ORDER BY c1 LIMIT 5;
--echo ***** Select from Slave *****
--sync_slave_with_master
connection slave;
SELECT * FROM t1 ORDER BY c1 LIMIT 5;
###################################
# Just to some File Schema check #
###################################
--disable_query_log
SELECT DISTINCT FILE_NAME, FILE_TYPE, TABLESPACE_NAME, LOGFILE_GROUP_NAME
FROM INFORMATION_SCHEMA.FILES
WHERE ENGINE="ndbcluster" ORDER BY FILE_NAME;
--enable_query_log
--echo **** Do First Set of ALTERs in the master table ****
###################################################
# On this first set of alters I expect:
# 1. To be able to create and index on 2 columns
# 2. To be able to create a unique index
# 3. To be able to add two columns and have
# it all replicated correctly to the slave cluster.
###################################################
connection master;
CREATE INDEX t1_i ON t1(c2, c3);
#Bug 18039
CREATE UNIQUE INDEX t1_i2 ON t1(c2);
ALTER TABLE t1 ADD c4 TIMESTAMP;
ALTER TABLE t1 ADD c5 DOUBLE;
ALTER TABLE t1 ADD INDEX (c5);
SHOW CREATE TABLE t1;
--echo **** Show first set of ALTERs on SLAVE ****
--sync_slave_with_master
connection slave;
SHOW CREATE TABLE t1;
--echo **** Second set of alters test 1 ****
############################################
# With this next set of alters we have had
# Some issues with renames of tables. So this
# test renames our main table, drop and index off
# of it, creates another table with then name
# of the orginal table, inserts a row, drops
# the table and renames the orginal table back.
# I want to make sure that 1) the cluster does
# okay with this and 2) that it is replicated
# correctly.
#############################################
connection master;
ALTER TABLE t1 RENAME t2;
ALTER TABLE t2 DROP INDEX c5;
CREATE TABLE t1(c1 INT)ENGINE=NDB;
INSERT INTO t1 VALUES(1);
DROP TABLE t1;
ALTER TABLE t2 RENAME t1;
--echo **** Show second set of ALTERs on MASTER ****
SHOW CREATE TABLE t1;
--echo **** Show second set of ALTERs on SLAVE ****
--sync_slave_with_master
connection slave;
SHOW CREATE TABLE t1;
--echo **** Third and last set of alters for test1 ****
#########################################################
# In this last set of alters, we are messing with the
# cluster ability to rebuild indexes, drop a column that make up
# an index with another column and change types several times in
# a row. I have choosen the BLOB as it seems to have had many
# issues in this release. I want to make sure that the cluster
# deals with these radical changes and that the replication to
# the slave cluster is dones correctly.
###########################################################
connection master;
ALTER TABLE t1 CHANGE c1 c1 DOUBLE;
ALTER TABLE t1 CHANGE c2 c2 DECIMAL(10,2);
ALTER TABLE t1 DROP COLUMN c3;
ALTER TABLE t1 CHANGE c4 c4 TEXT CHARACTER SET utf8;
ALTER TABLE t1 CHANGE c4 c4 BLOB;
ALTER TABLE t1 CHANGE c4 c3 BLOB;
set @b1 = 'b1';
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
UPDATE t1 SET c3=@b1 where c1 = 1;
UPDATE t1 SET c3=@b1 where c1 = 2;
--echo **** Show last set of ALTERs on MASTER ****
SHOW CREATE TABLE t1;
SELECT * FROM t1 ORDER BY c1 LIMIT 5;
--echo **** Show last set of ALTERs on SLAVE ****
--sync_slave_with_master
connection slave;
SHOW CREATE TABLE t1;
# Bug 18094
SELECT * FROM t1 ORDER BY c1 LIMIT 5;
SELECT * FROM t1 where c1 = 1;
connection master;
DROP TABLE t1;
--sync_slave_with_master
connection slave;
STOP SLAVE;
RESET SLAVE;
connection master;
RESET MASTER;
connection slave;
START SLAVE;
################### TEST 2 TPCB for disk data ###########################
# Requirement: To have Stored Procedures and Functions that are used to #
# populate and post transactions to the data base using CDD that span #
# 2 tables spaces and also use a memory only cluster tables. In addition#
# The slave is to be stopped, cleaned and restored and synced with the #
# Master cluster #
#########################################################################
--echo ******** Create additional TABLESPACE test 2 **************
connection master;
CREATE TABLESPACE ts2
ADD DATAFILE 'datafile03.dat'
USE LOGFILE GROUP lg1
INITIAL_SIZE 10M
ENGINE=NDB;
ALTER TABLESPACE ts2
ADD DATAFILE 'datafile04.dat'
INITIAL_SIZE 5M
ENGINE=NDB;
let engine_type=NDBCLUSTER;
let table_space=ts2;
let format='RBR';
--source include/tpcb_disk_data.inc
--echo ****** TEST 2 test time *********************************
USE tpcb;
--echo *********** Load up the database ******************
CALL tpcb.load();
--echo ********** Check load master and slave **************
SELECT COUNT(*) FROM account;
--sync_slave_with_master
connection slave;
USE tpcb;
SELECT COUNT(*) FROM account;
--echo ******** Run in some transactions ***************
connection master;
let $j= 100;
--disable_query_log
while ($j)
{
eval CALL tpcb.trans($format);
dec $j;
}
--enable_query_log
--echo ***** Time to try slave sync ***********
--echo **** Must make sure slave is clean *****
--connection slave
STOP SLAVE;
RESET SLAVE;
DROP PROCEDURE IF EXISTS tpcb.load;
DROP PROCEDURE IF EXISTS tpcb.trans;
DROP TABLE IF EXISTS tpcb.account;
DROP TABLE IF EXISTS tpcb.teller;
DROP TABLE IF EXISTS tpcb.branch;
DROP TABLE IF EXISTS tpcb.history;
DROP DATABASE tpcb;
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile.dat'
ENGINE=NDB;
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile02.dat'
ENGINE=NDB;
DROP TABLESPACE ts1 ENGINE=NDB;
ALTER TABLESPACE ts2
DROP DATAFILE 'datafile03.dat'
ENGINE=NDB;
ALTER TABLESPACE ts2
DROP DATAFILE 'datafile04.dat'
ENGINE=NDB;
DROP TABLESPACE ts2 ENGINE=NDB;
DROP LOGFILE GROUP lg1 ENGINE=NDB;
--echo ********** Take a backup of the Master *************
connection master;
SELECT COUNT(*) FROM history;
let $j= 100;
--disable_query_log
while ($j)
{
eval CALL tpcb.trans($format);
dec $j;
}
--enable_query_log
SELECT COUNT(*) FROM history;
--source include/ndb_backup.inc
--echo ************ Restore the slave ************************
connection slave;
CREATE DATABASE tpcb;
--source include/ndb_restore_slave_eoption.inc
--echo ***** Check a few slave restore values ***************
connection slave;
USE tpcb;
SELECT COUNT(*) FROM account;
--echo ***** Add some more records to master *********
connection master;
let $j= 100;
--disable_query_log
while ($j)
{
eval CALL tpcb.trans($format);
dec $j;
}
--enable_query_log
#
# now setup replication to continue from last epoch
# 1. get apply_status epoch from slave
# 2. get corresponding _next_ binlog postition from master
# 3. change master on slave
# 4. add some transaction for slave to process
# 5. start the replication
--echo ***** Finsh the slave sync process *******
--disable_query_log
# 1. 2. 3.
--sync_slave_with_master
--source include/ndb_setup_slave.inc
--enable_query_log
# 4.
--echo * 4. *
connection master;
let $j= 100;
--disable_query_log
while ($j)
{
eval CALL tpcb.trans($format);
dec $j;
}
--enable_query_log
# 5.
--echo * 5. *
connection slave;
START SLAVE;
--echo **** We should be ready to continue on *************
connection master;
--echo ****** Let's make sure we match *******
--echo ***** MASTER *******
USE tpcb;
SELECT COUNT(*) FROM history;
--echo ****** SLAVE ********
--sync_slave_with_master
connection slave;
USE tpcb;
SELECT COUNT(*) FROM history;
--echo *** DUMP MASTER & SLAVE FOR COMPARE ********
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_M.sql
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_S.sql
--echo *************** TEST 2 CLEANUP SECTION ********************
connection master;
DROP PROCEDURE IF EXISTS tpcb.load;
DROP PROCEDURE IF EXISTS tpcb.trans;
DROP TABLE tpcb.account;
DROP TABLE tpcb.teller;
DROP TABLE tpcb.branch;
DROP TABLE tpcb.history;
DROP DATABASE tpcb;
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile.dat'
ENGINE=NDB;
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile02.dat'
ENGINE=NDB;
DROP TABLESPACE ts1 ENGINE=NDB;
ALTER TABLESPACE ts2
DROP DATAFILE 'datafile03.dat'
ENGINE=NDB;
ALTER TABLESPACE ts2
DROP DATAFILE 'datafile04.dat'
ENGINE=NDB;
DROP TABLESPACE ts2 ENGINE=NDB;
DROP LOGFILE GROUP lg1 ENGINE=NDB;
--sync_slave_with_master
connection master;
--echo ****** Do dumps compare ************
diff_files $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_M.sql $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_S.sql;
## Note: Ths files should only get removed, if the above diff succeeds.
remove_file $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_M.sql;
remove_file $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_S.sql;
# End 5.1 test case
--source include/rpl_end.inc
|