File: mempool2.c

package info (click to toggle)
valgrind 1%3A3.12.0~svn20160714-1
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 120,428 kB
  • ctags: 70,855
  • sloc: ansic: 674,645; exp: 26,134; xml: 21,574; asm: 7,570; cpp: 7,567; makefile: 7,380; sh: 6,188; perl: 5,855; haskell: 195
file content (198 lines) | stat: -rw-r--r-- 4,959 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

// Simplified version of mempool.c, that is more oriented towards
// checking that the description of invalid addresses is correct.

#include <stdio.h>
#include <unistd.h>
#include "tests/sys_mman.h"
#include <assert.h>
#include <stdlib.h>

#include "../memcheck.h"

#define SUPERBLOCK_SIZE 100000
#define REDZONE_SIZE 8

typedef struct _level_list
{
   struct _level_list *next;
   char *where;
   // Padding ensures the struct is the same size on 32-bit and 64-bit
   // machines.
   char padding[16 - 2*sizeof(char*)];
} level_list;

typedef struct _pool {
   char *mem;
   char *where; 
   level_list *levels;
   int size, left;
   // Padding ensures the struct is the same size on 32-bit and 64-bit
   // machines.
   char padding[24 - 3*sizeof(char*)];
} pool;

pool *make_pool( int use_mmap )
{
   pool *p;

   if (use_mmap) {
      p = (pool *)mmap(0, sizeof(pool), PROT_READ|PROT_WRITE|PROT_EXEC,
                       MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
      p->where = p->mem = (char *)mmap(NULL, SUPERBLOCK_SIZE,
                                       PROT_READ|PROT_WRITE|PROT_EXEC,
                                       MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   } else {
      p = (pool *)malloc(sizeof(pool));
      p->where = p->mem = (char *)malloc(SUPERBLOCK_SIZE);
   }

   p->size = p->left = SUPERBLOCK_SIZE;
   p->levels = NULL;
   (void) VALGRIND_MAKE_MEM_NOACCESS(p->where, SUPERBLOCK_SIZE);
   return p;
}

void push(pool *p, int use_mmap )
{
   level_list *l;

   if (use_mmap)
      l = (level_list *)mmap(0, sizeof(level_list),
                             PROT_READ|PROT_WRITE|PROT_EXEC,
                             MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   else
      l = (level_list *)malloc(sizeof(level_list));

   l->next = p->levels;
   l->where = p->where;
   VALGRIND_CREATE_MEMPOOL(l->where, REDZONE_SIZE, 0);
   p->levels = l;
}

void pop(pool *p, int use_mmap)
{
   level_list *l = p->levels;
   p->levels = l->next;
   VALGRIND_DESTROY_MEMPOOL(l->where);
   (void) VALGRIND_MAKE_MEM_NOACCESS(l->where, p->where-l->where);
   p->where = l->where;
   if (use_mmap)
      munmap(l, sizeof(level_list));
   else
      free(l);
}

void destroy_pool(pool *p, int use_mmap)
{
   level_list *l = p->levels;

   while(l) {
      pop(p, use_mmap);
   }
   if (use_mmap) {
      munmap(p->mem, SUPERBLOCK_SIZE);
      munmap(p, sizeof(pool));
   } else {
      free(p->mem);
      free(p);
   }
}

char *allocate(pool *p, int size)
{
   char *where;
   p->left -= size + (REDZONE_SIZE*2);
   where = p->where + REDZONE_SIZE;
   p->where += size + (REDZONE_SIZE*2);
   VALGRIND_MEMPOOL_ALLOC(p->levels->where, where, size);
   return where;
}

//-------------------------------------------------------------------------
// Rest
//-------------------------------------------------------------------------

void test(void)
{
   char *x1, *x2;
   char res = 0;

   // p1 is a malloc-backed pool
   pool *p1 = make_pool(0);

   // p2 is a mmap-backed pool
   pool *p2 = make_pool(1);

   push(p1, 0);
   push(p2, 1);

   x1 = allocate(p1, 10);
   x2 = allocate(p2, 20);

   fprintf(stderr,
           "\n------ out of range reads in malloc-backed pool ------\n\n");
   res += x1[-1];
   res += x1[10];

   fprintf(stderr,
           "\n------ out of range reads in mmap-backed pool ------\n\n");
   res += x2[-1]; // invalid
   res += x2[20]; // invalid

   fprintf(stderr,
           "\n------ read free in malloc-backed pool ------\n\n");
   VALGRIND_MEMPOOL_FREE(p1, x1);
   res += x1[5];

   fprintf(stderr,
           "\n------ read free in mmap-backed pool ------\n\n");
   VALGRIND_MEMPOOL_FREE(p2, x2);
   res += x2[11];

   fprintf(stderr,
           "\n------ double free in malloc-backed pool ------\n\n");
   VALGRIND_MEMPOOL_FREE(p1, x1);

   fprintf(stderr,
           "\n------ double free in mmap-backed pool ------\n\n");
   VALGRIND_MEMPOOL_FREE(p2, x2);

   {
      // test that redzone are still protected even if the user forgets
      // to mark the superblock noaccess.
      char superblock[100];

      VALGRIND_CREATE_MEMPOOL(superblock, REDZONE_SIZE, 0);
      // User should mark the superblock no access to benefit
      // from full Valgrind memcheck protection.
      // VALGRIND_MEMPOOL_ALLOC will however still ensure the
      // redzones are protected.
      VALGRIND_MEMPOOL_ALLOC(superblock, superblock+30, 10);

      res += superblock[30]; // valid
      res += superblock[39]; // valid

      fprintf(stderr,
              "\n------ 2 invalid access in 'no no-access superblock' ---\n\n");
      res += superblock[29]; // invalid
      res += superblock[40]; // invalid

      VALGRIND_DESTROY_MEMPOOL(superblock);
   }
   // claim res is used, so gcc can't nuke this all
   __asm__ __volatile__("" : : "r"(res));

   fprintf(stderr,
           "\n------ done ------\n\n");
   pop(p1, 0);
   pop(p2, 1);
   destroy_pool(p1, 0);
   destroy_pool(p2, 1);
}

int main(void)
{
   test();
   return 0;
}