1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
|
/* Defining _XOPEN_SOURCE hides the declaration of madvise() on Solaris <
11 and the MADV_DONTNEED definition on IRIX 6.5. */
#undef _XOPEN_SOURCE
#include <errno.h>
#include <unistd.h>
#include "runtime.h"
#include "arch.h"
#include "malloc.h"
#ifndef MAP_ANON
#ifdef MAP_ANONYMOUS
#define MAP_ANON MAP_ANONYMOUS
#else
#define USE_DEV_ZERO
#define MAP_ANON 0
#endif
#endif
#ifndef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif
#ifdef USE_DEV_ZERO
static int dev_zero = -1;
#endif
static int32
addrspace_free(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused)))
{
#ifdef HAVE_MINCORE
size_t page_size = getpagesize();
int32 errval;
uintptr chunk;
uintptr off;
// NOTE: vec must be just 1 byte long here.
// Mincore returns ENOMEM if any of the pages are unmapped,
// but we want to know that all of the pages are unmapped.
// To make these the same, we can only ask about one page
// at a time. See golang.org/issue/7476.
static byte vec[1];
errno = 0;
for(off = 0; off < n; off += chunk) {
chunk = page_size * sizeof vec;
if(chunk > (n - off))
chunk = n - off;
errval = mincore((char*)v + off, chunk, (void*)vec);
// ENOMEM means unmapped, which is what we want.
// Anything else we assume means the pages are mapped.
if(errval == 0 || errno != ENOMEM)
return 0;
}
#endif
return 1;
}
static void *
mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
{
void *p;
p = runtime_mmap((void *)v, n, prot, flags, fd, offset);
if(p != v && addrspace_free(v, n)) {
// On some systems, mmap ignores v without
// MAP_FIXED, so retry if the address space is free.
if(p != MAP_FAILED)
runtime_munmap(p, n);
p = runtime_mmap((void *)v, n, prot, flags|MAP_FIXED, fd, offset);
}
return p;
}
void*
runtime_SysAlloc(uintptr n, uint64 *stat)
{
void *p;
int fd = -1;
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
p = runtime_mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
if (p == MAP_FAILED) {
if(errno == EACCES) {
runtime_printf("runtime: mmap: access denied\n");
runtime_printf("if you're running SELinux, enable execmem for this process.\n");
exit(2);
}
if(errno == EAGAIN) {
runtime_printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n");
runtime_exit(2);
}
return nil;
}
runtime_xadd64(stat, n);
return p;
}
void
runtime_SysUnused(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused)))
{
#ifdef MADV_DONTNEED
runtime_madvise(v, n, MADV_DONTNEED);
#endif
}
void
runtime_SysUsed(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
runtime_SysFree(void *v, uintptr n, uint64 *stat)
{
runtime_xadd64(stat, -(uint64)n);
runtime_munmap(v, n);
}
void
runtime_SysFault(void *v, uintptr n)
{
int fd = -1;
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, fd, 0);
}
void*
runtime_SysReserve(void *v, uintptr n, bool *reserved)
{
int fd = -1;
void *p;
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
if(sizeof(void*) == 8 && (n >> 16) > 1LLU<<16) {
p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0);
if (p != v) {
runtime_munmap(p, 64<<10);
return nil;
}
runtime_munmap(p, 64<<10);
*reserved = false;
return v;
}
// Use the MAP_NORESERVE mmap() flag here because typically most of
// this reservation will never be used. It does not make sense
// reserve a huge amount of unneeded swap space. This is important on
// systems which do not overcommit memory by default.
p = runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_NORESERVE, fd, 0);
if(p == MAP_FAILED)
return nil;
*reserved = true;
return p;
}
void
runtime_SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
int fd = -1;
runtime_xadd64(stat, n);
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
// On 64-bit, we don't actually have v reserved, so tread carefully.
if(!reserved) {
p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
if(p == MAP_FAILED && errno == ENOMEM)
runtime_throw("runtime: out of memory");
if(p != v) {
runtime_printf("runtime: address space conflict: map(%p) = %p\n", v, p);
runtime_throw("runtime: address space conflict");
}
return;
}
p = runtime_mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, fd, 0);
if(p == MAP_FAILED && errno == ENOMEM)
runtime_throw("runtime: out of memory");
if(p != v)
runtime_throw("runtime: cannot map pages in arena address space");
}
|