File: segment.h

package info (click to toggle)
kernel-source-2.0.35 2.0.35-3
  • links: PTS
  • area: main
  • in suites: slink
  • size: 32,456 kB
  • ctags: 94,327
  • sloc: ansic: 587,014; asm: 26,388; makefile: 4,055; sh: 1,221; perl: 727; tcl: 408; cpp: 277; lisp: 211; awk: 134
file content (123 lines) | stat: -rw-r--r-- 3,115 bytes parent folder | download | duplicates (13)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#ifndef _ASM_SEGMENT_H
#define _ASM_SEGMENT_H

#include <linux/string.h>

/*
 * This is a gcc optimization barrier, which essentially
 * inserts a sequence point in the gcc RTL tree that gcc
 * can't move code around. This is needed when we enter
 * or exit a critical region (in this case around user-level
 * accesses that may sleep, and we can't let gcc optimize
 * global state around them).
 */
#define __gcc_barrier() __asm__ __volatile__("": : :"memory")

/*
 * Uh, these should become the main single-value transfer routines..
 * They automatically use the right size if we just have the right
 * pointer type..
 */
#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr)))
#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr))))

/*
 * This is a silly but good way to make sure that
 * the __put_user function is indeed always optimized,
 * and that we use the correct sizes..
 */
extern int bad_user_access_length(void);

/* I should make this use unaligned transfers etc.. */
static inline void __put_user(unsigned long x, void * y, int size)
{
	__gcc_barrier();
	switch (size) {
		case 1:
			*(char *) y = x;
			break;
		case 2:
			*(short *) y = x;
			break;
		case 4:
			*(int *) y = x;
			break;
		case 8:
			*(long *) y = x;
			break;
		default:
			bad_user_access_length();
	}
	__gcc_barrier();
}

/* I should make this use unaligned transfers etc.. */
static inline unsigned long __get_user(const void * y, int size)
{
	unsigned long result;

	__gcc_barrier();
	switch (size) {
		case 1:
			result = *(unsigned char *) y;
			break;
		case 2:
			result = *(unsigned short *) y;
			break;
		case 4:
			result = *(unsigned int *) y;
			break;
		case 8:
			result = *(unsigned long *) y;
			break;
		default:
			result = bad_user_access_length();
	}
	__gcc_barrier();
	return result;
}

#define get_fs_byte(addr) get_user((unsigned char *)(addr))
#define get_fs_word(addr) get_user((unsigned short *)(addr))
#define get_fs_long(addr) get_user((unsigned int *)(addr))
#define get_fs_quad(addr) get_user((unsigned long *)(addr))

#define put_fs_byte(x,addr) put_user((x),(char *)(addr))
#define put_fs_word(x,addr) put_user((x),(short *)(addr))
#define put_fs_long(x,addr) put_user((x),(int *)(addr))
#define put_fs_quad(x,addr) put_user((x),(long *)(addr))

static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
{
	__gcc_barrier();
	memcpy(to, from, n);
	__gcc_barrier();
}

static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
{
	__gcc_barrier();
	memcpy(to, from, n);
	__gcc_barrier();
}

/*
 * The fs value determines whether argument validity checking should be
 * performed or not.  If get_fs() == USER_DS, checking is performed, with
 * get_fs() == KERNEL_DS, checking is bypassed.
 *
 * For historical reasons, these macros are grossly misnamed.
 */

#define KERNEL_DS	0
#define USER_DS		1

#define get_fs()  (current->tss.flags & 0x1)
#define set_fs(x) (current->tss.flags = (current->tss.flags & ~0x1) | ((x) & 0x1))

static inline unsigned long get_ds(void)
{
	return 0;
}

#endif /* _ASM_SEGMENT_H */