File: memory_map.h

package info (click to toggle)
scheme48 1.8%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: squeeze
  • size: 14,980 kB
  • ctags: 14,127
  • sloc: lisp: 76,272; ansic: 71,514; sh: 3,026; makefile: 637
file content (177 lines) | stat: -rw-r--r-- 4,971 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
/* Copyright (c) 1993-2008 by Richard Kelsey and Jonathan Rees.
   See file COPYING. */

#ifndef __S48_MEMORY_MAP_H
#define __S48_MEMORY_MAP_H

#include "scheme48arch.h"

#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif

#include "memory.h"
#include "areas.h"
#include "page_constants.h"

/* When a new page is allocated: */
extern void s48_memory_map_setB(s48_address address, Area* value);

/* When the Area structure of a page is needed: */
/* extern Area* s48_memory_map_ref(s48_address address);
   - defined inline below */

/* The following is only defined here to allow the inlining of
   s48_memory_map_ref, and not used elsewhere: */

#include <assert.h>

/* We need an integer type of the same size as pointers, and the bit
   length of addresses: */
#ifdef _WIN32
#define ADDRESS_LENGTH 32
#elif _WIN64
#define ADDRESS_LENGTH 64
#else
#include <limits.h>
#define ADDRESS_LENGTH WORDSIZE

#endif

/*** CONFIGURATION ***/

/* The memory_map provides a way to store and find an Area structure
   for every memory page we have allocated for the heap. It must be
   very fast, without consuming too much memory at the same time.

   For 32bit systems, this is quite easy, but to achieve this for
   64bit systems, a sort of hashing is implemented. It is
   automatically activated if the defined page size and a given size
   for a statically allocated array are too small to cover all
   potential addresses.

   The first relevant size is defined in page_constants.h:

   LOG_BYTES_PER_PAGE

   The next one defines the size of a statically allocated global
   array, which stores pointers to Metapage structures; it's
   logarithmic size is:
*/

#define LOG_TABLE_SIZE 10

/* Metapage structures are allocated by need, and each consists of
   another array of pointers to Area structures. It's logarithmic size
   is:
*/

#define LOG_PAGES_PER_METAPAGE 10

/*** END OF CONFIGURATION ***/

/* Now with the usual sizes on a 32bit system these sizes sum up to 32
   bits and we don't need the hashing algorithm. Let's see if we do
   need it:
 */

#define USED_ADDRESS_BITS \
  (LOG_BYTES_PER_PAGE + LOG_TABLE_SIZE + LOG_PAGES_PER_METAPAGE)

#define REMAINING_ADDRESS_BITS \
  (ADDRESS_LENGTH - USED_ADDRESS_BITS)

#if REMAINING_ADDRESS_BITS > 0
#define NEED_METAPAGE_HASHING
#elif REMAINING_ADDRESS_BITS == 0
#undef NEED_METAPAGE_HASHING
#else
#error "Misconfigured memory map."##REMAINING_ADDRESS_BITS
#endif

/* For both direct access and hashed access, we split an address into
   the following fields:

   high                                                             low
   |   Rest   |  Metapage in Table  | Page in Metapage | Byte in Page |

   If the Rest has 0-length we don't need hashing.

 */

/* Some sizes: */
#define METAPAGES (((uintptr_t)1) << LOG_TABLE_SIZE)
#define PAGES_PER_METAPAGE (((uintptr_t)1) << LOG_PAGES_PER_METAPAGE)

/* Some accessors for the fields : */

#define ADDR_METAPAGE_INDEX(address) \
  ( ( ((uintptr_t)address) >> (LOG_BYTES_PER_PAGE + LOG_PAGES_PER_METAPAGE) ) \
    & (METAPAGES - 1) )

#define ADDR_PAGE_INDEX(address) \
  ( ( ((uintptr_t)address) >> LOG_BYTES_PER_PAGE ) \
    & (PAGES_PER_METAPAGE - 1) )

#ifdef NEED_METAPAGE_HASHING

#define ADDR_REST(address) \
  ( ((uintptr_t)address) >> USED_ADDRESS_BITS )

/* To identify the correct hash bucket, we need store the start
   address of all pages of a metapage. We use this macro to compare
   them: */
#define ADDR_REST_MASK \
  ( ((uintptr_t)-1) << USED_ADDRESS_BITS )
#define ADDR_REST_MASKED(address) ((void*)(ADDR_REST_MASK & ((uintptr_t)address)))
#define IS_CORRECT_METAPAGE(metapage, address) \
  ( ADDR_REST_MASKED(metapage->start_address) == ADDR_REST_MASKED(address) )

#endif

/* And now the structure we use; for hashing, we use a linked list of
   Metapages for pages which have the same ADDR_METAPAGE_INDEX, but
   different ADDR_REST parts:
*/

typedef struct _Metapage {
#ifdef NEED_METAPAGE_HASHING
  s48_address start_address;
  struct _Metapage* next;
#endif
  Area* contents[PAGES_PER_METAPAGE];
} Metapage;

#define TABLE_SIZE (1L << LOG_TABLE_SIZE)

/* static Metapage* s48_memory_table[TABLE_SIZE]; */
extern Metapage* s48_memory_table[TABLE_SIZE];

#ifdef NEED_METAPAGE_HASHING

/* returns the place of the found pointer to the metapage, resp. the
   place where a newly allocated metepage should be stored: */
inline static Metapage** find_metapagep(s48_address address) {
  Metapage** bucketp = &s48_memory_table[ADDR_METAPAGE_INDEX(address)];
  while ((*bucketp != NULL) && (!IS_CORRECT_METAPAGE((*bucketp), address))) {
    bucketp = &(*bucketp)->next;
  }
  assert(bucketp != NULL);
  return bucketp;
}

#else

#define find_metapagep(address) (&s48_memory_table[ADDR_METAPAGE_INDEX(address)])

#endif

inline static Area* s48_memory_map_ref(s48_address address) {
  Metapage* metapage = *find_metapagep(address);
  if (metapage == NULL)
    return NULL;
  else
    return metapage->contents[ADDR_PAGE_INDEX(address)];
};

#endif