File: recd025.tcl

package info (click to toggle)
db5.3 5.3.28%2Bdfsg2-1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 158,500 kB
  • sloc: ansic: 448,411; java: 111,824; tcl: 80,544; sh: 44,264; cs: 33,697; cpp: 21,604; perl: 14,557; xml: 10,799; makefile: 4,077; javascript: 1,998; yacc: 1,003; awk: 965; sql: 801; erlang: 342; python: 216; php: 24; asm: 14
file content (237 lines) | stat: -rw-r--r-- 6,736 bytes parent folder | download | duplicates (9)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 2013 Oracle and/or its affiliates.  All rights reserved.
#
# $Id$
#
# TEST	recd025
# TEST	Basic tests for transaction bulk loading and recovery.
# TEST	In particular, verify that the tricky hot backup protocol works.

# These tests check the following conditions

# 1. We can abort a TXN_BULK transaction, both online and during recovery.
# 2. Tricky hot backup protocol works correctly
#     - start bulk txn; populate, adding pages
#     - set hotbackup_in_progress (forces chkpt, as bulk txn is in progress)
#     - copy database file
#     - populate more, adding more pages (with bulk optimization disabled)
#     - commit
#     - move copy back into test dir
#     - roll forward, verify rolled-forward backup matches committed database

# A more straightforward test of bulk transactions and hot backup is
# in the backup test.
    
proc recd025 { method args } {
	global fixed_len
	global is_hp_test
	source ./include.tcl

	# Skip test for specified page sizes -- we want to
	# specify our own page size.
	set pgindex [lsearch -exact $args "-pagesize"]
	if { $pgindex != -1 } {
		puts "Recd025: Skipping for specific pagesizes"
		return
	}

	# Skip test for heap, as heap does not have BULK ops
	if { [is_heap $method] == 1 } {
		puts "Recd025 skipping for heap method."
		return
	}

	# Increase size of fixed-length records to match other methods.
        # We pick an arbitrarily larger record size to ensure that we
        # allocate several pages.
	set orig_fixed_len $fixed_len
	set fixed_len 53
	set opts [convert_args $method $args]
	set omethod [convert_method $method]
	
	puts "Recd025: TXN_BULK page allocation and recovery"

	# Create the database and environment.
	env_cleanup $testdir
	set testfile recd025.db

	puts "\tRecd025.1a: creating environment"
	set env_cmd "berkdb_env -create -txn -home $testdir"
	set dbenv [eval $env_cmd]
	error_check_good dbenv [is_valid_env $dbenv] TRUE

	# Open database with small pages.
	puts "\tRecd025.1b: creating and populating database with small pages"
	set pagesize 512
	set oflags "-create $omethod -mode 0644 -pagesize $pagesize \
	  -env $dbenv -auto_commit $opts $testfile"
	set db [eval {berkdb_open} $oflags]

	error_check_good db_open [is_valid_db $db] TRUE

	set batchsize 20
	set lim 0
	set iter 1
	set datasize 53
	set data [repeat "a" $datasize]

	set t [$dbenv txn]

	for {set lim [expr $lim + $batchsize]} {$iter <= $lim } {incr iter} {
		eval {$db put} -txn $t $iter $data
	}
	error_check_good txn_commit [$t commit] 0

	error_check_good sync:$db [$db sync] 0

	# Make a copy of the database now, for comparison

	catch {
	    file copy -force $testdir/$testfile $testdir/$testfile.orig
	} res
	copy_extent_file $testdir $testfile orig
	eval open_and_dump_file $testdir/$testfile.orig NULL \
	  $testdir/dump.orig nop dump_file_direction "-first" "-next" $opts

	puts "\tRecd025.1c: start bulk transaction, put data, allocating pages"
	
	set t [$dbenv txn -txn_bulk]

	for {set lim [expr $lim + $batchsize]} {$iter <= $lim } {incr iter} {
		eval {$db put} -txn $t $iter $data
	}

	# A copy before aborting
	error_check_good sync:$db [$db sync] 0
	catch {
		file copy -force $testdir/$testfile $testdir/$testfile.preabort
	} res
	copy_extent_file $testdir $testfile preabort

	puts "\tRecd025.1d: abort bulk transaction; verify undo of puts"

	error_check_good txn_abort [$t abort] 0

	error_check_good sync:$db [$db sync] 0

	eval open_and_dump_file $testdir/$testfile NULL \
	  $testdir/dump.postabort nop dump_file_direction "-first" "-next" $opts

	filesort $testdir/dump.orig $testdir/dump.orig.sort
	filesort $testdir/dump.postabort $testdir/dump.postabort.sort

	error_check_good verify_abort_diff \
	  [filecmp $testdir/dump.orig.sort $testdir/dump.postabort.sort] 0

	error_check_good db_close [$db close] 0
	reset_env $dbenv

	puts "\tRecd025.1e: recovery with allocations rolled back"

	# Move the preabort file into place, and run recovery

	catch {
		file copy -force $testdir/$testfile.preabort $testdir/$testfile
	} res

	set stat [catch {eval exec $util_path/db_recover -h $testdir -c } res]
	if { $stat == 1 } {
		error "FAIL: Recovery error: $res."
	}

	eval open_and_dump_file $testdir/$testfile NULL \
	  $testdir/dump.postrecovery nop dump_file_direction "-first" "-next" $opts
	filesort $testdir/dump.postrecovery $testdir/dump.postrecovery.sort

	error_check_good verify_abort_diff \
	  [filecmp $testdir/dump.orig.sort $testdir/dump.postabort.sort] 0


	# Now for the really tricky hot backup test.

	puts "\tRecd025.3a: opening environment"
	set env_cmd "berkdb_env -create -txn -home $testdir"
	set dbenv [eval $env_cmd]
	error_check_good dbenv [is_valid_env $dbenv] TRUE

	# Open database
	puts "\tRecd025.3b: opening database with small pages"
	set oflags "$omethod -pagesize $pagesize \
	  -env $dbenv -auto_commit $opts $testfile"
	set db [eval {berkdb_open} $oflags]

	error_check_good db_open [is_valid_db $db] TRUE
	
	puts "\tRecd025.3c: start bulk transaction and add pages"
	set t [$dbenv txn -txn_bulk]

	for {set lim [expr $lim + $batchsize]} {$iter <= $lim } {incr iter} {
		eval {$db put} -txn $t $iter $data
	}
	
	puts "\tRecd025.3d: Set hotbackup_in_progress, and copy the database"

	$dbenv set_flags -hotbackup_in_progress on

	catch {
		file copy -force $testdir/$testfile $testdir/$testfile.hotcopy
	} res

	puts "\tRecd025.3e: add more pages and commit"


	for {set lim [expr $lim + $batchsize] } {$iter <= $lim } {incr iter} {
		eval {$db put} -txn $t $iter $data
	}

	error_check_good txn_commit [$t commit] 0

	$dbenv set_flags -hotbackup_in_progress off

	error_check_good db_close [$db close] 0
	reset_env $dbenv

	# dump the finished product

	eval open_and_dump_file $testdir/$testfile NULL \
	  $testdir/dump.final nop dump_file_direction "-first" "-next" $opts

	filesort $testdir/dump.final $testdir/dump.final.sort

	puts "\tRecd025.3f: roll forward the hot copy and compare"

	catch {
		file copy -force $testdir/$testfile.hotcopy $testdir/$testfile
	} res

        # Perform catastrophic recovery, to simulate hot backup behavior.
	set stat [catch {eval exec $util_path/db_recover -h $testdir -c } res]
	if { $stat == 1 } {
		error "FAIL: Recovery error: $res."
	}

	eval open_and_dump_file $testdir/$testfile NULL \
	  $testdir/dump.recovered_copy nop dump_file_direction "-first" "-next" $opts
	filesort $testdir/dump.recovered_copy $testdir/dump.recovered_copy.sort

	error_check_good verify_abort_diff \
	  [filecmp $testdir/dump.final.sort $testdir/dump.recovered_copy.sort] 0

	# Set fixed_len back to the global value so we don't
	# mess up other tests.
	set fixed_len $orig_fixed_len
	return
}