1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
|
/* Copyright (C) 2003,2004 Andi Kleen, SuSE Labs.
libnuma is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; version
2.1.
libnuma is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should find a copy of v2.1 of the GNU Lesser General Public License
somewhere on your Linux system; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <unistd.h>
#include <sys/types.h>
#include <asm/unistd.h>
#include <errno.h>
#include "numa.h"
#include "numaif.h"
#include "numaint.h"
#define WEAK __attribute__((weak))
#if !defined(__NR_mbind) || !defined(__NR_set_mempolicy) || \
!defined(__NR_get_mempolicy) || !defined(__NR_migrate_pages) || \
!defined(__NR_move_pages)
#if defined(__x86_64__)
#define __NR_sched_setaffinity 203
#define __NR_sched_getaffinity 204
/* Official allocation */
#define __NR_mbind 237
#define __NR_set_mempolicy 238
#define __NR_get_mempolicy 239
#define __NR_migrate_pages 256
#define __NR_move_pages 279
#elif defined(__ia64__)
#define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232
#define __NR_migrate_pages 1280
#define __NR_move_pages 1276
/* Official allocation */
#define __NR_mbind 1259
#define __NR_get_mempolicy 1260
#define __NR_set_mempolicy 1261
#elif defined(__i386__)
#define __NR_mbind 274
#define __NR_get_mempolicy 275
#define __NR_set_mempolicy 276
#define __NR_migrate_pages 294
#define __NR_move_pages 317
#elif defined(__powerpc__)
#define __NR_mbind 259
#define __NR_get_mempolicy 260
#define __NR_set_mempolicy 261
#define __NR_migrate_pages 258
/* FIXME: powerpc is missing move pages!!!
#define __NR_move_pages xxx
*/
#elif defined(__mips__)
#if _MIPS_SIM == _ABIO32
/*
* Linux o32 style syscalls are in the range from 4000 to 4999.
*/
#define __NR_Linux 4000
#define __NR_mbind (__NR_Linux + 268)
#define __NR_get_mempolicy (__NR_Linux + 269)
#define __NR_set_mempolicy (__NR_Linux + 270)
#define __NR_migrate_pages (__NR_Linux + 287)
#endif
#if _MIPS_SIM == _ABI64
/*
* Linux 64-bit syscalls are in the range from 5000 to 5999.
*/
#define __NR_Linux 5000
#define __NR_mbind (__NR_Linux + 227)
#define __NR_get_mempolicy (__NR_Linux + 228)
#define __NR_set_mempolicy (__NR_Linux + 229)
#define __NR_migrate_pages (__NR_Linux + 246)
#endif
#if _MIPS_SIM == _ABIN32
/*
* Linux N32 syscalls are in the range from 6000 to 6999.
*/
#define __NR_Linux 6000
#define __NR_mbind (__NR_Linux + 231)
#define __NR_get_mempolicy (__NR_Linux + 232)
#define __NR_set_mempolicy (__NR_Linux + 233)
#define __NR_migrate_pages (__NR_Linux + 250)
#endif
#elif defined(__hppa__)
#define __NR_migrate_pages 272
#elif !defined(DEPS_RUN)
#error "Add syscalls for your architecture or update kernel headers"
#endif
#endif
#if defined(__GLIBC__) && __GLIBC_PREREQ(2, 11)
/* glibc 2.11 seems to have working 6 argument sycall. Use the
glibc supplied syscall in this case.
The version cut-off is rather arbitary and could be probably
earlier. */
#define syscall6 syscall
#elif defined(__x86_64__)
/* 6 argument calls on x86-64 are often buggy in both glibc and
asm/unistd.h. Add a working version here. */
long syscall6(long call, long a, long b, long c, long d, long e, long f)
{
long res;
asm volatile ("movq %[d],%%r10 ; movq %[e],%%r8 ; movq %[f],%%r9 ; syscall"
: "=a" (res)
: "0" (call),"D" (a),"S" (b), "d" (c),
[d] "g" (d), [e] "g" (e), [f] "g" (f) :
"r11","rcx","r8","r10","r9","memory" );
if (res < 0) {
errno = -res;
res = -1;
}
return res;
}
#elif defined(__i386__)
/* i386 has buggy syscall6 in glibc too. This is tricky to do
in inline assembly because it clobbers so many registers. Do it
out of line. */
asm(
"__syscall6:\n"
" pushl %ebp\n"
" pushl %edi\n"
" pushl %esi\n"
" pushl %ebx\n"
" movl (0+5)*4(%esp),%eax\n"
" movl (1+5)*4(%esp),%ebx\n"
" movl (2+5)*4(%esp),%ecx\n"
" movl (3+5)*4(%esp),%edx\n"
" movl (4+5)*4(%esp),%esi\n"
" movl (5+5)*4(%esp),%edi\n"
" movl (6+5)*4(%esp),%ebp\n"
" int $0x80\n"
" popl %ebx\n"
" popl %esi\n"
" popl %edi\n"
" popl %ebp\n"
" ret"
);
extern long __syscall6(long n, long a, long b, long c, long d, long e, long f);
long syscall6(long call, long a, long b, long c, long d, long e, long f)
{
long res = __syscall6(call,a,b,c,d,e,f);
if (res < 0) {
errno = -res;
res = -1;
}
return res;
}
#else
#define syscall6 syscall
#endif
long WEAK get_mempolicy(int *policy, const unsigned long *nmask,
unsigned long maxnode, void *addr, int flags)
{
return syscall(__NR_get_mempolicy, policy, nmask,
maxnode, addr, flags);
}
long WEAK mbind(void *start, unsigned long len, int mode,
const unsigned long *nmask, unsigned long maxnode, unsigned flags)
{
return syscall6(__NR_mbind, (long)start, len, mode, (long)nmask,
maxnode, flags);
}
long WEAK set_mempolicy(int mode, const unsigned long *nmask,
unsigned long maxnode)
{
long i;
i = syscall(__NR_set_mempolicy,mode,nmask,maxnode);
return i;
}
long WEAK migrate_pages(int pid, unsigned long maxnode,
const unsigned long *frommask, const unsigned long *tomask)
{
return syscall(__NR_migrate_pages, pid, maxnode, frommask, tomask);
}
long WEAK move_pages(int pid, unsigned long count,
void **pages, const int *nodes, int *status, int flags)
{
return syscall(__NR_move_pages, pid, count, pages, nodes, status, flags);
}
/* SLES8 glibc doesn't define those */
int numa_sched_setaffinity_v1(pid_t pid, unsigned len, const unsigned long *mask)
{
return syscall(__NR_sched_setaffinity,pid,len,mask);
}
__asm__(".symver numa_sched_setaffinity_v1,numa_sched_setaffinity@libnuma_1.1");
int numa_sched_setaffinity_v2(pid_t pid, struct bitmask *mask)
{
return syscall(__NR_sched_setaffinity, pid, numa_bitmask_nbytes(mask),
mask->maskp);
}
__asm__(".symver numa_sched_setaffinity_v2,numa_sched_setaffinity@@libnuma_1.2");
int numa_sched_getaffinity_v1(pid_t pid, unsigned len, const unsigned long *mask)
{
return syscall(__NR_sched_getaffinity,pid,len,mask);
}
__asm__(".symver numa_sched_getaffinity_v1,numa_sched_getaffinity@libnuma_1.1");
int numa_sched_getaffinity_v2(pid_t pid, struct bitmask *mask)
{
/* len is length in bytes */
return syscall(__NR_sched_getaffinity, pid, numa_bitmask_nbytes(mask),
mask->maskp);
/* sched_getaffinity returns sizeof(cpumask_t) */
}
__asm__(".symver numa_sched_getaffinity_v2,numa_sched_getaffinity@@libnuma_1.2");
make_internal_alias(numa_sched_getaffinity_v1);
make_internal_alias(numa_sched_getaffinity_v2);
make_internal_alias(numa_sched_setaffinity_v1);
make_internal_alias(numa_sched_setaffinity_v2);
|