1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
|
include/group_replication.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the connection metadata repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START REPLICA; see the 'START REPLICA Syntax' in the MySQL Manual for more information.
[connection server1]
# 1. Install clone plugin on server1.
[connection server1]
INSTALL PLUGIN clone SONAME 'CLONE_PLUGIN';
# 2. Start server 1 and bootstrap group
include/start_and_bootstrap_group_replication.inc
# 3. Create some transactions to send to joiner
CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY , b INT);
INSERT INTO test.t1 (b) VALUES (1);
INSERT INTO test.t1 (b) VALUES (2);
# 4. Change settings on recovery to speed up failure
[connection server2]
include/spawn_monitoring_process.inc
SET GLOBAL group_replication_recovery_reconnect_interval= 1;
SET GLOBAL group_replication_recovery_retry_count= 1;
# 5. Setup the server so group replication starts on boot
# Install the Clone plugin
INSTALL PLUGIN clone SONAME 'CLONE_PLUGIN';
# 6. Ensure clone is used on recovery
SET GLOBAL group_replication_clone_threshold= 1;
# 7. Activate debug point that will simulate invalid recovery endpoints
# from donor
SET @debug_save= @@GLOBAL.DEBUG;
SET @@GLOBAL.DEBUG='+d,gr_recovery_endpoints_invalid_donor';
# 8. Recovery will fail and server state change to ERROR
include/start_group_replication.inc
# 9. Clone and incremental distributed recovery will fail connecting to donor
include/assert_grep.inc [recovery channel received an invalid recovery endpoints configuration from donor]
# 10. Disable debug point ans server will be able to join
SET @@GLOBAL.DEBUG= @debug_save;
# 11. Stop server 2 and start it again, the information received
# from donor will be valid
[connection server2]
include/stop_group_replication.inc
# 12. Clean debug point to allow to recovery be successful
START GROUP_REPLICATION;
include/rpl_reconnect.inc
include/gr_wait_for_member_state.inc
# 13. Cleanup
DROP TABLE test.t1;
include/rpl_sync.inc
set session sql_log_bin=0;
call mtr.add_suppression("Maximum number of retries when*");
call mtr.add_suppression("Fatal error during the incremental recovery process of Group Replication.*");
call mtr.add_suppression("Skipping leave operation: concurrent attempt to leave the group is on-going.");
call mtr.add_suppression("The server was automatically set into read only mode after an error was detected.");
call mtr.add_suppression("Due to the number of missing transactions being higher than the configured threshold of*");
call mtr.add_suppression("Clone removing all user data for provisioning: Started");
call mtr.add_suppression("Clone removing all user data for provisioning: Finished");
call mtr.add_suppression("Due to some issue on the previous step distributed recovery is now executing: Incremental Recovery.");
call mtr.add_suppression("Failed to shutdown components infrastructure.");
call mtr.add_suppression("Received invalid recovery endpoints configuration from donor. This member is not a valid donor for recovery, so it will be skipped.");
set session sql_log_bin=1;
SET GLOBAL group_replication_clone_threshold= 9223372036854775807;
RESET PERSIST IF EXISTS group_replication_group_name;
RESET PERSIST IF EXISTS group_replication_local_address;
RESET PERSIST IF EXISTS group_replication_group_seeds;
RESET PERSIST IF EXISTS group_replication_start_on_boot;
RESET PERSIST IF EXISTS group_replication_communication_stack;
SET GLOBAL group_replication_start_on_boot= START_ON_BOOT_VALUE;
include/clean_monitoring_process.inc
UNINSTALL PLUGIN clone;
[connection server1]
UNINSTALL PLUGIN clone;
include/group_replication_end.inc
|