1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
|
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 2013 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST recd005
# TEST Verify reuse of file ids works on catastrophic recovery.
# TEST
# TEST Make sure that we can do catastrophic recovery even if we open
# TEST files using the same log file id.
proc recd005 { method args } {
source ./include.tcl
global rand_init
set envargs ""
set zero_idx [lsearch -exact $args "-zero_log"]
if { $zero_idx != -1 } {
set args [lreplace $args $zero_idx $zero_idx]
set envargs "-zero_log"
}
set args [convert_args $method $args]
set omethod [convert_method $method]
puts "Recd005: $method catastrophic recovery ($envargs)"
berkdb srand $rand_init
set testfile1 recd005.1.db
set testfile2 recd005.2.db
set max_locks 2000
set eflags "-create -txn wrnosync -lock_max_locks $max_locks \
-lock_max_objects $max_locks -home $testdir $envargs"
set tnum 0
foreach sizes "{1000 10} {10 1000}" {
foreach ops "{abort abort} {abort commit} {commit abort} \
{commit commit}" {
env_cleanup $testdir
incr tnum
set s1 [lindex $sizes 0]
set s2 [lindex $sizes 1]
set op1 [lindex $ops 0]
set op2 [lindex $ops 1]
puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
puts "\tRecd005.$tnum.a: creating environment"
set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
# Create the two databases.
set oflags "-create \
-auto_commit -mode 0644 -env $dbenv $args $omethod"
set db1 [eval {berkdb_open} $oflags $testfile1]
error_check_bad db_open $db1 NULL
error_check_good db_open [is_substr $db1 db] 1
error_check_good db_close [$db1 close] 0
set db2 [eval {berkdb_open} $oflags $testfile2]
error_check_bad db_open $db2 NULL
error_check_good db_open [is_substr $db2 db] 1
error_check_good db_close [$db2 close] 0
$dbenv close
set dbenv [eval $env_cmd]
puts "\tRecd005.$tnum.b: Populating databases"
eval {do_one_file $testdir \
$method $dbenv $env_cmd $testfile1 $s1 $op1 } $args
eval {do_one_file $testdir \
$method $dbenv $env_cmd $testfile2 $s2 $op2 } $args
puts "\tRecd005.$tnum.c: Verifying initial population"
eval {check_file \
$testdir $env_cmd $testfile1 $op1 } $args
eval {check_file \
$testdir $env_cmd $testfile2 $op2 } $args
# Now, close the environment (so that recovery will work
# on NT which won't allow delete of an open file).
reset_env $dbenv
berkdb debug_check
puts -nonewline \
"\tRecd005.$tnum.d: About to run recovery ... "
flush stdout
set stat [catch \
{exec $util_path/db_recover -h $testdir -c} \
result]
if { $stat == 1 } {
error "Recovery error: $result."
}
puts "complete"
# Substitute a file that will need recovery and try
# running recovery again.
if { $op1 == "abort" } {
file copy -force $testdir/$testfile1.afterop \
$testdir/$testfile1
move_file_extent $testdir $testfile1 \
afterop copy
} else {
file copy -force $testdir/$testfile1.init \
$testdir/$testfile1
move_file_extent $testdir $testfile1 init copy
}
if { $op2 == "abort" } {
file copy -force $testdir/$testfile2.afterop \
$testdir/$testfile2
move_file_extent $testdir $testfile2 \
afterop copy
} else {
file copy -force $testdir/$testfile2.init \
$testdir/$testfile2
move_file_extent $testdir $testfile2 init copy
}
berkdb debug_check
puts -nonewline "\tRecd005.$tnum.e:\
About to run recovery on pre-op database ... "
flush stdout
set stat \
[catch {exec $util_path/db_recover \
-h $testdir -c} result]
if { $stat == 1 } {
error "Recovery error: $result."
}
puts "complete"
set dbenv [eval $env_cmd]
eval {check_file \
$testdir $env_cmd $testfile1 $op1 } $args
eval {check_file \
$testdir $env_cmd $testfile2 $op2 } $args
reset_env $dbenv
puts "\tRecd005.$tnum.f:\
Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch \
{exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
error_check_good db_printlog $stat 0
fileremove $tmpfile
}
}
}
proc do_one_file { dir method env env_cmd filename num op args} {
source ./include.tcl
set init_file $dir/$filename.t1
set afterop_file $dir/$filename.t2
set final_file $dir/$filename.t3
# Save the initial file and open the environment and the first file
file copy -force $dir/$filename $dir/$filename.init
copy_extent_file $dir $filename init
# If method is heap, copy other files
if { [is_heap $method] == 1 } {
append filename1 $filename "1"
file copy -force $dir/$filename1 $dir/$filename.init1
copy_extent_file $dir $filename1 init
append filename2 $filename "2"
file copy -force $dir/$filename2 $dir/$filename.init2
copy_extent_file $dir $filename2 init
}
set oflags "-auto_commit -unknown -env $env"
set db [eval {berkdb_open} $oflags $args $filename]
# Dump out file contents for initial case
eval open_and_dump_file $filename $env $init_file nop \
dump_file_direction "-first" "-next" $args
set txn [$env txn]
error_check_bad txn_begin $txn NULL
error_check_good txn_begin [is_substr $txn $env] 1
# Now fill in the db and the txnid in the command
populate $db $method $txn $num 0 0
# Sync the file so that we can capture a snapshot to test
# recovery.
error_check_good sync:$db [$db sync] 0
file copy -force $dir/$filename $dir/$filename.afterop
copy_extent_file $dir $filename afterop
#if we are doing heap, we have more files to copy
if { [is_heap $method] == 1 } {
file copy -force $dir/$filename1 $dir/$filename.afterop1
copy_extent_file $dir $filename1 afterop
file copy -force $dir/$filename2 $dir/$filename.afterop2
copy_extent_file $dir $filename2 afterop
}
eval open_and_dump_file $testdir/$filename.afterop NULL \
$afterop_file nop dump_file_direction "-first" "-next" $args
error_check_good txn_$op:$txn [$txn $op] 0
if { $op == "commit" } {
puts "\t\tFile $filename executed and committed."
} else {
puts "\t\tFile $filename executed and aborted."
}
# Dump out file and save a copy.
error_check_good sync:$db [$db sync] 0
eval open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next" $args
file copy -force $dir/$filename $dir/$filename.final
copy_extent_file $dir $filename final
#if we are doing heap, we have more files to copy
if { [is_heap $method] == 1 } {
file copy -force $dir/$filename1 $dir/$filename.final1
copy_extent_file $dir $filename1 final
file copy -force $dir/$filename2 $dir/$filename.final2
copy_extent_file $dir $filename2 final
}
# If this is an abort, it should match the original file.
# If this was a commit, then this file should match the
# afterop file.
if { $op == "abort" } {
filesort $init_file $init_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(initial,post-$op):diff($init_file,$final_file) \
[filecmp $init_file.sort $final_file.sort] 0
} else {
filesort $afterop_file $afterop_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
[filecmp $afterop_file.sort $final_file.sort] 0
}
error_check_good close:$db [$db close] 0
}
proc check_file { dir env_cmd filename op args} {
source ./include.tcl
set init_file $dir/$filename.t1
set afterop_file $dir/$filename.t2
set final_file $dir/$filename.t3
eval open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next" $args
if { $op == "abort" } {
filesort $init_file $init_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(initial,post-$op):diff($init_file,$final_file) \
[filecmp $init_file.sort $final_file.sort] 0
} else {
filesort $afterop_file $afterop_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
[filecmp $afterop_file.sort $final_file.sort] 0
}
}
|