File: mu_swap_root.c

package info (click to toggle)
fis-gtm 7.0-005-1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 32,264 kB
  • sloc: ansic: 336,687; asm: 5,184; csh: 4,823; sh: 1,945; awk: 291; makefile: 72; sed: 13
file content (492 lines) | stat: -rw-r--r-- 19,079 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
/****************************************************************
 *								*
 * Copyright (c) 2012-2021 Fidelity National Information	*
 * Services, Inc. and/or its subsidiaries. All rights reserved.	*
 *								*
 *	This source code contains the intellectual property	*
 *	of its copyright holder(s), and is made available	*
 *	under a license.  If you do not know the terms of	*
 *	the license, please stop and do not read further.	*
 *								*
 ****************************************************************/

#include "mdef.h"

#include "gtm_string.h"
#include "cdb_sc.h"
#include "gdsroot.h"
#include "gdsblk.h"
#include "gtm_facility.h"
#include "fileinfo.h"
#include "gdsbt.h"
#include "gdsfhead.h"
#include "filestruct.h"
#include "jnl.h"
#include "gdsblkops.h"
#include "gdskill.h"
#include "gdscc.h"
#include "copy.h"
#include "interlock.h"
#include "muextr.h"
#include "mu_reorg.h"
/* Include prototypes */
#include "t_end.h"
#include "t_retry.h"
#include "mupip_reorg.h"
#include "util.h"
#include "t_begin.h"
#include "op.h"
#include "gvcst_protos.h"	/* for gvcst_rtsib,gvcst_search prototype */
#include "gvcst_bmp_mark_free.h"
#include "gvcst_kill_sort.h"
#include "gtmmsg.h"
#include "add_inter.h"
#include "t_abort.h"
#include "sleep_cnt.h"
#include "wcs_sleep.h"
#include "memcoherency.h"
#include "gdsbml.h"
#include "jnl_get_checksum.h"
#include "t_qread.h"
#include "t_create.h"
#include "t_write_map.h"
#include "t_write.h"
#include "change_reg.h"

GBLREF	boolean_t		mu_reorg_process;
GBLREF	boolean_t		need_kip_incr;
GBLREF	char			*update_array, *update_array_ptr;
GBLREF	cw_set_element		cw_set[];
GBLREF	gd_region		*gv_cur_region;
GBLREF	gv_key			*gv_altkey;
GBLREF	gv_key			*gv_currkey, *gv_altkey;
GBLREF	gv_namehead		*gv_target;
GBLREF	gv_namehead		*reorg_gv_target;
GBLREF	inctn_opcode_t		inctn_opcode;
GBLREF	inctn_opcode_t		inctn_opcode;
GBLREF	kill_set		*kill_set_tail;
GBLREF	sgmnt_addrs		*cs_addrs;
GBLREF	sgmnt_addrs		*kip_csa;
GBLREF	sgmnt_data_ptr_t	cs_data;
GBLREF	uint4			t_err;
GBLREF	uint4			update_trans;
GBLREF	uint4			update_array_size;
GBLREF	unsigned char		cw_map_depth;
GBLREF	unsigned char		cw_set_depth;
GBLREF	unsigned char		rdfail_detail;
GBLREF	unsigned int		t_tries;

error_def(ERR_DBRDONLY);
error_def(ERR_GBLNOEXIST);
error_def(ERR_MAXBTLEVEL);
error_def(ERR_MUREORGFAIL);
error_def(ERR_MUTRUNCNOTBG);

#define RETRY_SWAP		(0)
#define ABORT_SWAP		(-1)

void	mu_swap_root(glist *gl_ptr, int *root_swap_statistic_ptr, block_id upg_mv_block)
{
	block_id		child_blk_id, free_blk_id;
	block_id		save_root;
	boolean_t		killed_global;
	enum cdb_sc		status;
	gv_namehead		*save_targ;
	int			level;
	kill_set		kill_set_list;
	node_local_ptr_t	cnl;
	sgmnt_data_ptr_t	csd;
	sgmnt_addrs		*csa;
	sm_uc_ptr_t		child_blk_ptr;
	srch_hist		*dir_hist_ptr, *gvt_hist_ptr;
	trans_num		curr_tn, ret_tn;
	unsigned int		lcl_t_tries;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(mu_reorg_process);	/* TODO: use upg_mv_block? */
	gv_target = gl_ptr->gvt;
	gv_target->clue.end = 0;		/* reset clue since reorg action on later globals might have invalidated it */
	reorg_gv_target->gvname.var_name = gv_target->gvname.var_name;	/* needed by SAVE_ROOTSRCH_ENTRY_STATE */
	dir_hist_ptr = gv_target->alt_hist;
	gvt_hist_ptr = &(gv_target->hist);
	inctn_opcode = inctn_invalid_op;
	if (0 == upg_mv_block)
	{	/* set gv_target/gv_currkey/gv_cur_region/cs_addrs/cs_data to correspond to <globalname,reg> in gl_ptr */
		gv_target->root = 0;		/* reset root so we recompute it in DO_OP_GVNAME below */
		DO_OP_GVNAME(gl_ptr);
	}
	csa = cs_addrs;
	cnl = csa->nl;
	csd = cs_data;	/* keep csd up to date; with MM, cs_data can change, and, dereferencing an older copy, cause SIG-11 */
	if (gv_cur_region->read_only)			/* TODO: ensure such a check exists early on in upgrade logic */
		return;					/* Cannot proceed for read-only data files */
	killed_global = FALSE;
	while (0 == gv_target->root)
	{	/* Global does not "exist" */
		if ((0 != upg_mv_block) && (gv_target->hist.h[1].blk_num == gv_target->hist.h[0].blk_num))
		{
			gv_target->root = gv_target->hist.h[0].blk_num;
			killed_global = TRUE;
			break;
		}
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr);
		return;					/* must have been an online rollback - no problem, a message suffices */
	}
	if ((dba_mm == csd->acc_meth) && (0 == upg_mv_block))
		 /* return for now without doing any swapping operation because later mu_truncate
		 * is going to issue the MUTRUNCNOTBG message.
		 */
		return;
	SET_GV_ALTKEY_TO_GBLNAME_FROM_GV_CURRKEY;		/* set up gv_altkey to be just the gblname */
	/* ------------ Swap root block of global variable tree --------- */
	t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK);
	for (;;)
	{
		curr_tn = csa->ti->curr_tn;
		kill_set_list.used = 0;
		if (!killed_global)
		{
			save_root = gv_target->root;
			gv_target->root = csa->dir_tree->root;
			gv_target->clue.end = 0;
			if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr)))
			{	/* Assign directory tree path to dir_hist_ptr */
				assert(t_tries < CDB_STAGNATE);
				gv_target->root = save_root;
				t_retry(status);
				continue;
			}
			gv_target->root = save_root;
			gv_target->clue.end = 0;
			if (cdb_sc_normal != (gvcst_search(gv_currkey, NULL)))
			{	/* Assign global variable tree path to gvt_hist_ptr */
				assert(t_tries < CDB_STAGNATE);
				t_retry(status);
				continue;
			}
		}
		/* we've already searched the directory tree in op_gvname/t_retry and obtained gv_target->root.
		 * Should restart with gvtrootmod2 if they don't agree. gvcst_root_search is the final arbiter.
		 * Really need that for debug info and also should assert(gv_currkey is global name).
		 */
		free_blk_id = mu_swap_root_blk(gl_ptr, gvt_hist_ptr, dir_hist_ptr, &kill_set_list, curr_tn, upg_mv_block);
		if (RETRY_SWAP == free_blk_id)
			continue;
		else if (ABORT_SWAP == free_blk_id)
			break;
		DECR_KIP(csd, csa, kip_csa);
		*root_swap_statistic_ptr += 1;
		break;
	}
	if (DIR_ROOT == upg_mv_block)
		return;
	/* ------------ Swap blocks in branch of directory tree --------- */
	for (level = 0; (0 == upg_mv_block) && (level <= MAX_BT_DEPTH); level++)
	{
		t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK);
		for (;;)
		{
			curr_tn = csa->ti->curr_tn;
			kill_set_list.used = 0;
			save_root = gv_target->root;
			gv_target->root = csa->dir_tree->root;
			gv_target->clue.end = 0;
			if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr)))
			{	/* assign branch path of directory tree into dir_hist_ptr */
				assert(t_tries < CDB_STAGNATE);
				gv_target->root = save_root;
				t_retry(status);
				continue;
			}
			gv_target->root = save_root;
			gv_target->clue.end = 0;
			if (level >= dir_hist_ptr->depth)
			{	/* done */
				t_abort(gv_cur_region, csa);
				return;
			}
			child_blk_ptr = dir_hist_ptr->h[level].buffaddr;
			child_blk_id = dir_hist_ptr->h[level].blk_num;
			assert(csa->dir_tree->root != child_blk_id);
			free_blk_id = swap_root_or_directory_block(level + 1, level, dir_hist_ptr, child_blk_id,
					child_blk_ptr, &kill_set_list, curr_tn, 0);
			if (level == 0)
				/* set level as 1 to mark this kill set is for level-0 block in directory tree.
				 * The kill-set level later will be used in gvcst_bmp_markfree to assign a special value to
				 * cw_set_element, which will be eventually used by t_end to write the block to snapshot
				 */
				kill_set_list.blk[kill_set_list.used - 1].level = 1;
			if (RETRY_SWAP == free_blk_id)
				continue;
			else if (ABORT_SWAP == free_blk_id)
				break;
			update_trans = UPDTRNS_DB_UPDATED_MASK;
			inctn_opcode = inctn_mu_reorg;
			assert(1 == kill_set_list.used);
			need_kip_incr = TRUE;
			if (!csa->now_crit)
				WAIT_ON_INHIBIT_KILLS(cnl, MAXWAIT2KILL);
			DEBUG_ONLY(lcl_t_tries = t_tries);
			TREF(in_mu_swap_root_state) = MUSWP_DIRECTORY_SWAP;
			if ((trans_num)0 == (ret_tn = t_end(dir_hist_ptr, NULL, TN_NOT_SPECIFIED)))
			{
				TREF(in_mu_swap_root_state) = MUSWP_NONE;
				need_kip_incr = FALSE;
				assert(NULL == kip_csa);
				continue;
			}
			TREF(in_mu_swap_root_state) = MUSWP_NONE;
			gvcst_kill_sort(&kill_set_list);
			TREF(in_mu_swap_root_state) = MUSWP_FREE_BLK;
			GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg,
					inctn_opcode, csa);
			TREF(in_mu_swap_root_state) = MUSWP_NONE;
			DECR_KIP(csd, csa, kip_csa);
			break;
		}
	}
	DEFERRED_EXIT_REORG_CHECK;	/* a single directory tree has to be quick, so check at end, rather than each DECR_KIP  */
	return;
}

block_id mu_swap_root_blk(glist *gl_ptr, srch_hist *gvt_hist_ptr, srch_hist *dir_hist_ptr, kill_set *kill_set_list,
		trans_num curr_tn, block_id upg_mv_block)
{
	block_id		free_blk_id, root_blk_id;
	boolean_t		tn_aborted;
	int			root_blk_lvl;
	sm_uc_ptr_t		root_blk_ptr;
	trans_num		ret_tn;
	unsigned int		lcl_t_tries;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	root_blk_lvl = gvt_hist_ptr->depth;
	assert(0 < root_blk_lvl);
	root_blk_ptr = gvt_hist_ptr->h[root_blk_lvl].buffaddr;
	root_blk_id = gvt_hist_ptr->h[root_blk_lvl].blk_num;
	assert((CDB_STAGNATE > t_tries) || (gv_target->root == gvt_hist_ptr->h[root_blk_lvl].blk_num));
	free_blk_id = swap_root_or_directory_block(0, root_blk_lvl, dir_hist_ptr, root_blk_id,
						   root_blk_ptr, kill_set_list, curr_tn, upg_mv_block);
	if ((RETRY_SWAP == free_blk_id) || (ABORT_SWAP == free_blk_id))
		return free_blk_id;
	assert(ABORT_SWAP < free_blk_id);
	update_trans = UPDTRNS_DB_UPDATED_MASK;
	inctn_opcode = inctn_mu_reorg;
	assert((1 == kill_set_list->used) || ((DIR_ROOT == gv_target->root) && (1 == gv_target->hist.h[1].blk_num)));
	need_kip_incr = TRUE;
	if (!cs_addrs->now_crit)
		WAIT_ON_INHIBIT_KILLS(cs_addrs->nl, MAXWAIT2KILL);
	DEBUG_ONLY(lcl_t_tries = t_tries);
	TREF(in_mu_swap_root_state) = MUSWP_INCR_ROOT_CYCLE;
	assert(!TREF(in_gvcst_redo_root_search));
	if ((trans_num)0 == (ret_tn = t_end(gvt_hist_ptr, (DIR_ROOT != root_blk_id) ? dir_hist_ptr : NULL, TN_NOT_SPECIFIED)))
	{
		TREF(in_mu_swap_root_state) = MUSWP_NONE;
		need_kip_incr = FALSE;
		assert(NULL == kip_csa);
		ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted);
		if (tn_aborted)
		{	/* It is not an error if the global (that once existed) doesn't exist anymore (due to ROLLBACK) */
			gtm_putmsg_csa(CSA_ARG(cs_addrs) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr);
			return curr_tn;
		}
		return RETRY_SWAP;
	}
	TREF(in_mu_swap_root_state) = MUSWP_NONE;
	/* Note that this particular process's csa->root_search_cycle is now behind cnl->root_search_cycle.
	 * This forces a cdb_sc_gvtrootmod2 restart in gvcst_bmp_mark_free below. TODO: how to handle after refactor???
	 */
	assert(cs_addrs->nl->root_search_cycle > cs_addrs->root_search_cycle);
	if (0 != upg_mv_block)
		cs_addrs->root_search_cycle = cs_addrs->nl->root_search_cycle;
	gvcst_kill_sort(kill_set_list);
	GVCST_BMP_MARK_FREE(kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, cs_addrs);
	return free_blk_id;
}

/* Finds a free block and adds information to update array and cw_set */
block_id swap_root_or_directory_block(int parent_blk_lvl, int child_blk_lvl, srch_hist *dir_hist_ptr, block_id child_blk_id,
		sm_uc_ptr_t child_blk_ptr, kill_set *kill_set_list, trans_num curr_tn, block_id upg_mv_block)
{
	blk_segment		*bs1, *bs_ptr;
	block_id		hint_blk_num, free_blk_id, parent_blk_id, total_blks, num_local_maps, master_bit,
				free_bit, temp_blk;
	boolean_t		free_blk_recycled, child_long_blk_id, parent_long_blk_id;
	cw_set_element		*tmpcse;
	int			blk_seg_cnt, blk_size;
	int			parent_blk_size, child_blk_size, bsiz;
	int			rec_size1, curr_offset, bpntr_end, hdr_len;
	int			tmp_cmpc, child_blk_id_sz, parent_blk_id_sz;
	int4			hint_bit, maxbitsthismap;
	jnl_buffer_ptr_t	jbbp; /* jbbp is non-NULL only if before-image journaling */
	node_local_ptr_t	cnl;
	sgmnt_data_ptr_t	csd;
	sgmnt_addrs		*csa;
	sm_uc_ptr_t		parent_blk_ptr, bn_ptr, saved_blk;
	srch_blk_status		bmlhist, freeblkhist;
	unsigned char		save_cw_set_depth;
	unsigned short		temp_ushort;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	csd = cs_data;
	csa = cs_addrs;
	cnl = csa->nl;
	blk_size = csd->blk_size;
	child_long_blk_id = IS_64_BLK_ID(child_blk_ptr);
	child_blk_id_sz = SIZEOF_BLK_ID(child_long_blk_id);
	/* Find a free/recycled block for new block location. */
	hint_blk_num = upg_mv_block;
	total_blks = csa->ti->total_blks;
	num_local_maps = DIVIDE_ROUND_UP(total_blks, BLKS_PER_LMAP);
	master_bit = bmm_find_free((hint_blk_num / BLKS_PER_LMAP), csa->bmm, num_local_maps);
	if ((NO_FREE_SPACE == master_bit))
	{
		assert(0 == upg_mv_block);
		t_abort(gv_cur_region, csa);
		return ABORT_SWAP;
	}
	bmlhist.blk_num = master_bit * BLKS_PER_LMAP;
	if (NULL == (bmlhist.buffaddr = t_qread(bmlhist.blk_num, (sm_int_ptr_t)&bmlhist.cycle, &bmlhist.cr)))
	{	/* WARNING: assignment above */
		assert(0 == upg_mv_block);
		assert(t_tries < CDB_STAGNATE);
		t_retry((enum cdb_sc)rdfail_detail);
		return RETRY_SWAP;
	}
	hint_bit = (0 != upg_mv_block) ? ((upg_mv_block % BLKS_PER_LMAP) - 1) : 0;
	/* (total_blks - bmlhist.blk_num) can be cast because it should never be larger then BLKS_PER_LMAP */
	assert(((master_bit + 1) < num_local_maps) || (BLKS_PER_LMAP >= (total_blks - bmlhist.blk_num)));
	maxbitsthismap = (master_bit != (num_local_maps - 1)) ? BLKS_PER_LMAP : (int4)(total_blks - bmlhist.blk_num);
	free_bit = bm_find_blk(hint_bit, bmlhist.buffaddr + SIZEOF(blk_hdr), maxbitsthismap, &free_blk_recycled);
	free_blk_id = bmlhist.blk_num + free_bit;
	assert((0 == upg_mv_block) || (upg_mv_block + 1 == free_blk_id));
	if (DIR_ROOT >= free_blk_id)
	{	/* Bitmap block 0 and directory tree root block 1 should always be marked busy. */
		assert(0 == upg_mv_block);
		assert(t_tries < CDB_STAGNATE);
		t_retry(cdb_sc_badbitmap);
		return RETRY_SWAP;
	}
	if ((child_blk_id <= free_blk_id) && (0 == upg_mv_block))
	{	/* stop swapping root or DT blocks once the database is truncated well enough. A good heuristic for this is
		* to check if the block is to be swapped into a higher block number and if so do not swap
		*/
		t_abort(gv_cur_region, csa);
		return ABORT_SWAP;
	}
	/* ====== begin update array ======
	 * Four blocks get changed.
	 * 	1. Free block becomes busy and gains the contents of child (root block/directory tree block)
	 * 	2. Parent block in directory tree remains busy, but points to new root block location.
	 *	3. Free block's corresponding bitmap reflects above change.
	 * 	4. Child block gets marked recycled in bitmap. (GVCST_BMP_MARK_FREE)
	 */
	CHECK_AND_RESET_UPDATE_ARRAY;
	if (free_blk_recycled)
	{	/* Otherwise, it's a completely free block, in which case no need to read. */
		freeblkhist.blk_num = free_blk_id;
		if (NULL == (freeblkhist.buffaddr = t_qread(free_blk_id, (sm_int_ptr_t)&freeblkhist.cycle, &freeblkhist.cr)))
		{
			assert(t_tries < CDB_STAGNATE);
			t_retry((enum cdb_sc)rdfail_detail);
			return RETRY_SWAP;
		}
	}
	child_blk_size = ((blk_hdr_ptr_t)child_blk_ptr)->bsiz;
	BLK_INIT(bs_ptr, bs1);
	BLK_ADDR(saved_blk, child_blk_size, unsigned char);
	memcpy(saved_blk, child_blk_ptr, child_blk_size);
	BLK_SEG(bs_ptr, saved_blk + SIZEOF(blk_hdr), child_blk_size - SIZEOF(blk_hdr));
	assert(blk_seg_cnt == child_blk_size);
	if (!BLK_FINI(bs_ptr, bs1))
	{
		assert(t_tries < CDB_STAGNATE);
		t_retry(cdb_sc_blkmod);
		return RETRY_SWAP;
	}
	tmpcse = &cw_set[cw_set_depth];
	(free_blk_recycled) ? BIT_SET_RECYCLED_AND_CLEAR_FREE(tmpcse->blk_prior_state)
			    : BIT_CLEAR_RECYCLED_AND_SET_FREE(tmpcse->blk_prior_state);
	t_create(free_blk_id, (unsigned char *)bs1, 0, 0, child_blk_lvl);
	tmpcse->mode = gds_t_acquired;
	if (!free_blk_recycled || !cs_data->db_got_to_v5_once)
		tmpcse->old_block = NULL;
	else
	{
		tmpcse->old_block = freeblkhist.buffaddr;
		tmpcse->cr = freeblkhist.cr;
		tmpcse->cycle = freeblkhist.cycle;
		jbbp = (JNL_ENABLED(csa) && csa->jnl_before_image) ? csa->jnl->jnl_buff : NULL;
		if ((NULL != jbbp) && (((blk_hdr_ptr_t)tmpcse->old_block)->tn < jbbp->epoch_tn))
		{
			bsiz = ((blk_hdr_ptr_t)(tmpcse->old_block))->bsiz;
			if (bsiz > blk_size)
			{
				assert(CDB_STAGNATE > t_tries);
				t_retry(cdb_sc_lostbmlcr);
				return RETRY_SWAP;
			}
			JNL_GET_CHECKSUM_ACQUIRED_BLK(tmpcse, csd, csa, tmpcse->old_block, bsiz);
		}
	}
	if (DIR_ROOT != child_blk_id)
	{
		/* 2. Parent block in directory tree remains busy, but points to new child block location. */
		parent_blk_ptr = dir_hist_ptr->h[parent_blk_lvl].buffaddr; /* 0 == parent_blk_lvl if moving a gvt root block */
		parent_blk_id = dir_hist_ptr->h[parent_blk_lvl].blk_num;
		parent_long_blk_id = IS_64_BLK_ID(parent_blk_ptr);
		parent_blk_id_sz = SIZEOF_BLK_ID(parent_long_blk_id);
		curr_offset = dir_hist_ptr->h[parent_blk_lvl].curr_rec.offset;
		parent_blk_size = ((blk_hdr_ptr_t)parent_blk_ptr)->bsiz;
		GET_RSIZ(rec_size1, (parent_blk_ptr + curr_offset));
		if ((parent_blk_size < rec_size1 + curr_offset) || (bstar_rec_size(parent_long_blk_id) > rec_size1))
		{
			assert(t_tries < CDB_STAGNATE);
			t_retry(cdb_sc_blkmod);
			return RETRY_SWAP;
		}
		BLK_INIT(bs_ptr, bs1);
		if (0 == parent_blk_lvl)
			/* There can be collation stuff in the record value after the block pointer. See gvcst_root_search. */
			hdr_len = SIZEOF(rec_hdr) + gv_altkey->end + 1 - EVAL_CMPC((rec_hdr_ptr_t)(parent_blk_ptr + curr_offset));
		else
			hdr_len = rec_size1 - parent_blk_id_sz;
		bpntr_end = curr_offset + hdr_len + parent_blk_id_sz;
		BLK_SEG(bs_ptr, parent_blk_ptr + SIZEOF(blk_hdr), curr_offset + hdr_len - SIZEOF(blk_hdr));
		BLK_ADDR(bn_ptr, parent_blk_id_sz, unsigned char);
		WRITE_BLK_ID(parent_long_blk_id, free_blk_id, bn_ptr);
		BLK_SEG(bs_ptr, bn_ptr, parent_blk_id_sz);
		BLK_SEG(bs_ptr, parent_blk_ptr + bpntr_end, parent_blk_size - bpntr_end);
		assert(blk_seg_cnt == parent_blk_size);
		if (!BLK_FINI(bs_ptr, bs1))
		{
			assert(t_tries < CDB_STAGNATE);
			t_retry(cdb_sc_blkmod);
			return RETRY_SWAP;
		}
		t_write(&dir_hist_ptr->h[parent_blk_lvl], (unsigned char *)bs1, 0, 0, parent_blk_lvl, FALSE, TRUE,
			GDS_WRITE_KILLTN);
		/* To indicate later snapshot file writing process during fast_integ must write the block to snapshot file */
		BIT_SET_DIR_TREE(cw_set[cw_set_depth - 1].blk_prior_state);
	}
	/* 3. Free block's corresponding bitmap reflects above change. */
	PUT_BLK_ID(update_array_ptr, free_bit);
	save_cw_set_depth = cw_set_depth; /* Bit maps go on end of cw_set (more fake acquired) */
	assert(!cw_map_depth);
	t_write_map(&bmlhist, (uchar_ptr_t)update_array_ptr, curr_tn, 1);
	cw_map_depth = cw_set_depth;
	cw_set_depth = save_cw_set_depth;
	update_array_ptr += SIZEOF(block_id);
	temp_blk = 0;
	PUT_BLK_ID(update_array_ptr, temp_blk);
	update_array_ptr += SIZEOF(block_id);
	assert(1 == cw_set[cw_map_depth - 1]. reference_cnt);
	/* 4. Child block gets marked recycled in bitmap. (GVCST_BMP_MARK_FREE) */
	kill_set_list->blk[kill_set_list->used].flag = 0;
	kill_set_list->blk[kill_set_list->used].level = 0;
	kill_set_list->blk[kill_set_list->used++].block = child_blk_id;
	return free_blk_id;
}