File: hash_stat.c

package info (click to toggle)
htdig 1%3A3.2.0b6-16
  • links: PTS
  • area: main
  • in suites: stretch
  • size: 15,624 kB
  • ctags: 9,630
  • sloc: ansic: 49,630; cpp: 46,470; sh: 17,400; xml: 4,180; perl: 2,543; makefile: 886; php: 79; asm: 14
file content (249 lines) | stat: -rw-r--r-- 5,971 bytes parent folder | download | duplicates (9)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
/*-
 * See the file LICENSE for redistribution information.
 *
 * Copyright (c) 1996, 1997, 1998, 1999
 *	Sleepycat Software.  All rights reserved.
 */

#include "db_config.h"

#ifndef lint
static const char sccsid[] = "@(#)hash_stat.c	11.5 (Sleepycat) 9/10/99";
#endif /* not lint */

#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>

#include <errno.h>
#include <string.h>
#endif

#include "db_int.h"
#include "db_page.h"
#include "db_shash.h"
#include "hash.h"
#include "lock.h"

static int CDB___ham_stat_callback __P((DB *, PAGE *, void *, int *));

/*
 * CDB___ham_stat --
 *	Gather/print the hash statistics
 *
 * PUBLIC: int CDB___ham_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
 */
int
CDB___ham_stat(dbp, spp, db_malloc, flags)
	DB *dbp;
	void *spp, *(*db_malloc) __P((size_t));
	u_int32_t flags;
{
	DB_HASH_STAT *sp;
	HASH_CURSOR *hcp;
	DBC *dbc;
	PAGE *h;
	db_pgno_t pgno;
	int ret;

	PANIC_CHECK(dbp->dbenv);
	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");

	sp = NULL;

	/* Check for invalid flags. */
	if ((ret = CDB___db_statchk(dbp, flags)) != 0)
		return (ret);

	if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
		return (ret);
	hcp = (HASH_CURSOR *)dbc->internal;

	if ((ret = CDB___ham_get_meta(dbc)) != 0)
		goto err;

	/* Allocate and clear the structure. */
	if ((ret = CDB___os_malloc(sizeof(*sp), db_malloc, &sp)) != 0)
		goto err;
	memset(sp, 0, sizeof(*sp));

	/* Copy the fields that we have. */
	sp->hash_pagesize = dbp->pgsize;
	sp->hash_buckets = hcp->hdr->max_bucket + 1;
	sp->hash_magic = hcp->hdr->dbmeta.magic;
	sp->hash_version = hcp->hdr->dbmeta.version;
	sp->hash_metaflags = hcp->hdr->dbmeta.flags;
	sp->hash_nelem = hcp->hdr->nelem;
	sp->hash_ffactor = hcp->hdr->ffactor;

	/* Walk the free list, counting pages. */
	for (sp->hash_free = 0, pgno = hcp->hdr->dbmeta.free;
	    pgno != PGNO_INVALID;) {
		++sp->hash_free;

		if ((ret = CDB_memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
			goto err;

		pgno = h->next_pgno;
		(void)CDB_memp_fput(dbp->mpf, h, 0);
	}

	/* Now traverse the rest of the table. */
	if ((ret = CDB___ham_traverse(dbp,
	    dbc, DB_LOCK_READ, CDB___ham_stat_callback, sp)) != 0)
		goto err;
	if ((ret = dbc->c_close(dbc)) != 0)
		goto err;
	if ((ret = CDB___ham_release_meta(dbc)) != 0)
		goto err;

	*(DB_HASH_STAT **)spp = sp;
	return (0);

err:	if (sp != NULL)
		CDB___os_free(sp, sizeof(*sp));
	if (hcp->hdr != NULL)
		(void)CDB___ham_release_meta(dbc);
	(void)dbc->c_close(dbc);
	return (ret);

}

/*
 * CDB___ham_traverse
 *	 Traverse an entire hash table.  We use the callback so that we
 * can use this both for stat collection and for deallocation.
 *
 * PUBLIC:  int CDB___ham_traverse __P((DB *, DBC *, db_lockmode_t,
 * PUBLIC:     int (*)(DB *, PAGE *, void *, int *), void *));
 */
int
CDB___ham_traverse(dbp, dbc, mode, callback, cookie)
	DB *dbp;
	DBC *dbc;
	db_lockmode_t mode;
	int (*callback) __P((DB *, PAGE *, void *, int *));
	void *cookie;
{
	HASH_CURSOR *hcp;
	HKEYDATA *hk;
	db_pgno_t pgno, opgno;
	u_int32_t bucket;
	int did_put, i, ret;

	hcp = (HASH_CURSOR *)dbc->internal;

	/*
	 * In a perfect world, we could simply read each page in the file
	 * and look at its page type to tally the information necessary.
	 * Unfortunately, the bucket locking that hash tables do to make
	 * locking easy, makes this a pain in the butt.  We have to traverse
	 * duplicate, overflow and big pages from the bucket so that we
	 * don't access anything that isn't properly locked.
	 */
	for (bucket = 0; bucket <= hcp->hdr->max_bucket; bucket++) {
		hcp->bucket = bucket;
		pgno = CDB___bucket_to_page(hcp, bucket);
		for (ret = CDB___ham_get_cpage(dbc, mode); ret == 0;
		    ret = CDB___ham_next_cpage(dbc, pgno, 0, 0)) {
			pgno = NEXT_PGNO(hcp->pagep);

			/*
			 * Go through each item on the page checking for
			 * duplicates (in which case we have to count the
			 * duplicate pages) or big key/data items (in which
			 * case we have to count those pages).
			 */
			for (i = 0; i < NUM_ENT(hcp->pagep); i++) {
				hk = (HKEYDATA *)P_ENTRY(hcp->pagep, i);
				switch (HPAGE_PTYPE(hk)) {
				case H_OFFDUP:
					memcpy(&opgno, HOFFDUP_PGNO(hk),
					    sizeof(db_pgno_t));
					if ((ret = CDB___db_traverse_dup(dbp,
					    opgno, callback, cookie))
					    != 0)
						return (ret);
					break;
				case H_OFFPAGE:
					/*
					 * We are about to get a big page
					 * which will use the same spot that
					 * the current page uses, so we need
					 * to restore the current page before
					 * looking at it again.
					 */
					memcpy(&opgno, HOFFPAGE_PGNO(hk),
					    sizeof(db_pgno_t));
					ret = CDB___db_traverse_big(dbp,
					    opgno, callback, cookie);
					if (ret != 0)
						return (ret);
					break;
				case H_DUPLICATE:
				case H_KEYDATA:
					break;
				}
			}

			/* Call the callback on main pages. */
			if ((ret = callback(dbp,
			    hcp->pagep, cookie, &did_put)) != 0)
				return (ret);

			if (did_put)
				hcp->pagep = NULL;
			if (pgno == PGNO_INVALID)
				break;
		}
		if (ret != 0)
			return (ret);

		if (F_ISSET(dbp->dbenv, DB_ENV_LOCKING))
			(void)CDB_lock_put(dbp->dbenv, &hcp->lock);
	}

	return (0);
}

static int
CDB___ham_stat_callback(dbp, pagep, cookie, putp)
	DB *dbp;
	PAGE *pagep;
	void *cookie;
	int *putp;
{
	DB_HASH_STAT *sp;

	*putp = 0;
	sp = cookie;

	switch (pagep->type) {
	case P_DUPLICATE:
		sp->hash_dup++;
		sp->hash_dup_free += P_FREESPACE(pagep);
		break;
	case P_OVERFLOW:
		sp->hash_bigpages++;
		sp->hash_big_bfree += P_OVFLSPACE(dbp->pgsize, pagep);
		break;
	case P_HASH:
		/*
		 * We count the buckets and the overflow pages
		 * separately and tally their bytes separately
		 * as well.  We need to figure out if this page
		 * is a bucket.
		 */
		if (PREV_PGNO(pagep) == PGNO_INVALID)
			sp->hash_bfree += P_FREESPACE(pagep);
		else {
			sp->hash_overflows++;
			sp->hash_ovfl_free += P_FREESPACE(pagep);
		}
		sp->hash_nrecs += H_NUMPAIRS(pagep);
		break;
	default:
		return (EINVAL);
	}

	return (0);
}