File: ss_lock_facility.c

package info (click to toggle)
fis-gtm 7.1-006-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 32,908 kB
  • sloc: ansic: 344,906; asm: 5,184; csh: 4,859; sh: 2,000; awk: 294; makefile: 73; sed: 13
file content (148 lines) | stat: -rw-r--r-- 4,638 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
/****************************************************************
 *								*
 * Copyright (c) 2010-2019 Fidelity National Information	*
 * Services, Inc. and/or its subsidiaries. All rights reserved.	*
 *								*
 *	This source code contains the intellectual property	*
 *	of its copyright holder(s), and is made available	*
 *	under a license.  If you do not know the terms of	*
 *	the license, please stop and do not read further.	*
 *								*
 ****************************************************************/

#include "mdef.h"

#include "gtm_string.h"

#include "gdsroot.h"
#include "gdskill.h"
#include "gdsblk.h"
#include "gtm_facility.h"
#include "fileinfo.h"
#include "gdsbt.h"
#include "gdsfhead.h"
#include "filestruct.h"
#include "iosp.h"
#include "relqop.h"
#include "copy.h"
#include "wcs_sleep.h"
#include "caller_id.h"
#include "gtm_rel_quant.h"
#include "sleep_cnt.h"
#include "interlock.h"
#include "is_proc_alive.h"
#include "mupipbckup.h"
#include "send_msg.h"
#include "performcaslatchcheck.h"
#include "gdsbgtr.h"
#include "lockconst.h"
#include "memcoherency.h"
#include "ss_lock_facility.h"

GBLREF	volatile int4		fast_lock_count;
GBLREF	uint4			process_id;
GBLREF	uint4			image_count;
GBLREF	int			num_additional_processors;
GBLREF	node_local_ptr_t	locknl;
GBLREF	gd_region		*gv_cur_region;

/* The below lock modules are modeled over shmpool.c. Change in one should be reflected in the other one as well */

boolean_t ss_get_lock(gd_region *reg)
{
	int			retries, spins, maxspins;
	int4			max_sleep_mask;
	node_local_ptr_t	cnl;
	sgmnt_addrs		*csa;
	sm_global_latch_ptr_t	latch;
	uint4			latch_pid;

	csa = &FILE_INFO(reg)->s_addrs;
	cnl = csa->nl;
	latch = &cnl->snapshot_crit_latch;
	max_sleep_mask = -1;	/* initialized to -1 to defer memory reference until needed */
	maxspins = num_additional_processors ? MAX_LOCK_SPINS(LOCK_SPINS, num_additional_processors) : 1;
	/* Since LOCK_TRIES is approx 50 seconds, give us 4X that long since IO is involved */
	++fast_lock_count;			 /* Disable wcs_stale for duration */
	for (retries = (LOCK_TRIES * 4) - 1; 0 < retries; retries--)
	{	/* this should use a mutex rather than a spin lock */
		for (spins = maxspins; 0 < spins; spins--)
		{	/* We better not hold it if trying to get it */
			assert(latch->u.parts.latch_pid != process_id
			       VMS_ONLY(|| latch->u.parts.latch_image_count != image_count));
                        if (GET_SWAPLOCK(latch))
			{
				DEBUG_ONLY(locknl = csa->nl);
				LOCK_HIST("OBTN", latch, process_id, retries);
				DEBUG_ONLY(locknl = NULL);
				/* Note that fast_lock_count is kept incremented for the duration that we hold the lock
				   to prevent our dispatching an interrupt that could deadlock getting this lock
				*/
				return TRUE;
			}
			if (!is_proc_alive(latch_pid = latch->u.parts.latch_pid, 0))	/* WARNING: assignment */
				COMPSWAP_UNLOCK(latch, latch_pid, 0, LOCK_AVAILABLE, 0);
		}
		REST_FOR_LATCH(latch, (-1 == max_sleep_mask) ? SPIN_SLEEP_MASK(csa->hdr) : max_sleep_mask, retries);
	}
	DUMP_LOCKHIST();
	--fast_lock_count;
	assert(0 <= fast_lock_count);
	assert(FALSE);
	return FALSE;
}

boolean_t ss_get_lock_nowait(gd_region *reg)
{
	sm_global_latch_ptr_t	latch;
	sgmnt_addrs		*csa;
	node_local_ptr_t	cnl;

	csa = &FILE_INFO(reg)->s_addrs;
	cnl = csa->nl;
	latch = &cnl->snapshot_crit_latch;
	++fast_lock_count;			/* Disable wcs_stale for duration */
	/* We better not hold it if trying to get it */
	assert(latch->u.parts.latch_pid != process_id VMS_ONLY(|| latch->u.parts.latch_image_count != image_count));
	if (GET_SWAPLOCK(latch))
	{
		DEBUG_ONLY(locknl = csa->nl);
		LOCK_HIST("OBTN", latch, process_id, -1);
		DEBUG_ONLY(locknl = NULL);
		/* Note that fast_lock_count is kept incremented for the duration that we hold the lock
		 * to prevent our dispatching an interrupt that could deadlock getting this lock
		 */
		return TRUE;
	}
	--fast_lock_count;
	assert(0 <= fast_lock_count);
	return FALSE;
}

void ss_release_lock(gd_region *reg)
{
	sm_global_latch_ptr_t	latch;
	sgmnt_addrs		*csa;
	node_local_ptr_t	cnl;

	csa = &FILE_INFO(reg)->s_addrs;
	cnl = csa->nl;
	latch = &cnl->snapshot_crit_latch;
	assert(process_id == latch->u.parts.latch_pid VMS_ONLY(&& image_count == latch->u.parts.latch_image_count));
	DEBUG_ONLY(locknl = csa->nl);
	LOCK_HIST("RLSE", latch, process_id, 0);
	RELEASE_SWAPLOCK(latch);
	DEBUG_ONLY(locknl = NULL);
	--fast_lock_count;
	assert(0 <= fast_lock_count);
}

boolean_t ss_lock_held_by_us(gd_region *reg)
{
	sgmnt_addrs		*csa;
	node_local_ptr_t	cnl;

	csa = &FILE_INFO(reg)->s_addrs;
	cnl = csa->nl;
	return GLOBAL_LATCH_HELD_BY_US(&cnl->snapshot_crit_latch);
}