Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
include
asm-generic
uaccess.h
Go to the documentation of this file.
1
#ifndef __ASM_GENERIC_UACCESS_H
2
#define __ASM_GENERIC_UACCESS_H
3
4
/*
5
* User space memory access functions, these should work
6
* on a ny machine that has kernel and user data in the same
7
* address space, e.g. all NOMMU machines.
8
*/
9
#include <linux/sched.h>
10
#include <
linux/mm.h
>
11
#include <linux/string.h>
12
13
#include <asm/segment.h>
14
15
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
16
17
#ifndef KERNEL_DS
18
#define KERNEL_DS MAKE_MM_SEG(~0UL)
19
#endif
20
21
#ifndef USER_DS
22
#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
23
#endif
24
25
#ifndef get_fs
26
#define get_ds() (KERNEL_DS)
27
#define get_fs() (current_thread_info()->addr_limit)
28
29
static
inline
void
set_fs
(
mm_segment_t
fs
)
30
{
31
current_thread_info
()->addr_limit =
fs
;
32
}
33
#endif
34
35
#define segment_eq(a, b) ((a).seg == (b).seg)
36
37
#define VERIFY_READ 0
38
#define VERIFY_WRITE 1
39
40
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
41
42
/*
43
* The architecture should really override this if possible, at least
44
* doing a check on the get_fs()
45
*/
46
#ifndef __access_ok
47
static
inline
int
__access_ok
(
unsigned
long
addr
,
unsigned
long
size
)
48
{
49
return
1;
50
}
51
#endif
52
53
/*
54
* The exception table consists of pairs of addresses: the first is the
55
* address of an instruction that is allowed to fault, and the second is
56
* the address at which the program should continue. No registers are
57
* modified, so it is entirely up to the continuation code to figure out
58
* what to do.
59
*
60
* All the routines below use bits of fixup code that are out of line
61
* with the main instruction path. This means when everything is well,
62
* we don't even have to jump over them. Further, they do not intrude
63
* on our cache or tlb entries.
64
*/
65
66
struct
exception_table_entry
67
{
68
unsigned
long
insn
,
fixup
;
69
};
70
71
/* Returns 0 if exception not found and fixup otherwise. */
72
extern
unsigned
long
search_exception_table
(
unsigned
long
);
73
74
/*
75
* architectures with an MMU should override these two
76
*/
77
#ifndef __copy_from_user
78
static
inline
__must_check
long
__copy_from_user
(
void
*to,
79
const
void
__user *
from
,
unsigned
long
n
)
80
{
81
if
(__builtin_constant_p(n)) {
82
switch
(n) {
83
case
1:
84
*(
u8
*)to = *(
u8
__force
*)
from
;
85
return
0;
86
case
2:
87
*(
u16
*)to = *(
u16
__force
*)
from
;
88
return
0;
89
case
4:
90
*(
u32
*)to = *(
u32
__force
*)
from
;
91
return
0;
92
#ifdef CONFIG_64BIT
93
case
8:
94
*(
u64
*)to = *(
u64
__force
*)
from
;
95
return
0;
96
#endif
97
default
:
98
break
;
99
}
100
}
101
102
memcpy
(to, (
const
void
__force
*)from, n);
103
return
0;
104
}
105
#endif
106
107
#ifndef __copy_to_user
108
static
inline
__must_check
long
__copy_to_user
(
void
__user *to,
109
const
void
*from,
unsigned
long
n)
110
{
111
if
(__builtin_constant_p(n)) {
112
switch
(n) {
113
case
1:
114
*(
u8
__force
*)to = *(
u8
*)
from
;
115
return
0;
116
case
2:
117
*(
u16
__force
*)to = *(
u16
*)
from
;
118
return
0;
119
case
4:
120
*(
u32
__force
*)to = *(
u32
*)
from
;
121
return
0;
122
#ifdef CONFIG_64BIT
123
case
8:
124
*(
u64
__force
*)to = *(
u64
*)
from
;
125
return
0;
126
#endif
127
default
:
128
break
;
129
}
130
}
131
132
memcpy
((
void
__force
*)to, from, n);
133
return
0;
134
}
135
#endif
136
137
/*
138
* These are the main single-value transfer routines. They automatically
139
* use the right size if we just have the right pointer type.
140
* This version just falls back to copy_{from,to}_user, which should
141
* provide a fast-path for small values.
142
*/
143
#define __put_user(x, ptr) \
144
({ \
145
__typeof__(*(ptr)) __x = (x); \
146
int __pu_err = -EFAULT; \
147
__chk_user_ptr(ptr); \
148
switch (sizeof (*(ptr))) { \
149
case 1: \
150
case 2: \
151
case 4: \
152
case 8: \
153
__pu_err = __put_user_fn(sizeof (*(ptr)), \
154
ptr, &__x); \
155
break; \
156
default: \
157
__put_user_bad(); \
158
break; \
159
} \
160
__pu_err; \
161
})
162
163
#define put_user(x, ptr) \
164
({ \
165
might_sleep(); \
166
access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
167
__put_user(x, ptr) : \
168
-EFAULT; \
169
})
170
171
static
inline
int
__put_user_fn(
size_t
size
,
void
__user *
ptr
,
void
*
x
)
172
{
173
size =
__copy_to_user
(ptr, x, size);
174
return
size ? -
EFAULT
:
size
;
175
}
176
177
extern
int
__put_user_bad
(
void
)
__attribute__
((noreturn));
178
179
#define __get_user(x, ptr) \
180
({ \
181
int __gu_err = -EFAULT; \
182
__chk_user_ptr(ptr); \
183
switch (sizeof(*(ptr))) { \
184
case 1: { \
185
unsigned char __x; \
186
__gu_err = __get_user_fn(sizeof (*(ptr)), \
187
ptr, &__x); \
188
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
189
break; \
190
}; \
191
case 2: { \
192
unsigned short __x; \
193
__gu_err = __get_user_fn(sizeof (*(ptr)), \
194
ptr, &__x); \
195
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
196
break; \
197
}; \
198
case 4: { \
199
unsigned int __x; \
200
__gu_err = __get_user_fn(sizeof (*(ptr)), \
201
ptr, &__x); \
202
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
203
break; \
204
}; \
205
case 8: { \
206
unsigned long long __x; \
207
__gu_err = __get_user_fn(sizeof (*(ptr)), \
208
ptr, &__x); \
209
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
210
break; \
211
}; \
212
default: \
213
__get_user_bad(); \
214
break; \
215
} \
216
__gu_err; \
217
})
218
219
#define get_user(x, ptr) \
220
({ \
221
might_sleep(); \
222
access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
223
__get_user(x, ptr) : \
224
-EFAULT; \
225
})
226
227
static
inline
int
__get_user_fn(
size_t
size
,
const
void
__user *
ptr
,
void
*
x
)
228
{
229
size =
__copy_from_user
(x, ptr, size);
230
return
size ? -
EFAULT
:
size
;
231
}
232
233
extern
int
__get_user_bad
(
void
)
__attribute__
((noreturn));
234
235
#ifndef __copy_from_user_inatomic
236
#define __copy_from_user_inatomic __copy_from_user
237
#endif
238
239
#ifndef __copy_to_user_inatomic
240
#define __copy_to_user_inatomic __copy_to_user
241
#endif
242
243
static
inline
long
copy_from_user
(
void
*to,
244
const
void
__user * from,
unsigned
long
n)
245
{
246
might_sleep
();
247
if
(
access_ok
(
VERIFY_READ
, from, n))
248
return
__copy_from_user
(to, from, n);
249
else
250
return
n
;
251
}
252
253
static
inline
long
copy_to_user
(
void
__user *to,
254
const
void
*from,
unsigned
long
n)
255
{
256
might_sleep
();
257
if
(
access_ok
(
VERIFY_WRITE
, to, n))
258
return
__copy_to_user
(to, from, n);
259
else
260
return
n
;
261
}
262
263
/*
264
* Copy a null terminated string from userspace.
265
*/
266
#ifndef __strncpy_from_user
267
static
inline
long
268
__strncpy_from_user
(
char
*
dst
,
const
char
__user *
src
,
long
count
)
269
{
270
char
*
tmp
;
271
strncpy
(dst, (
const
char
__force
*)src, count);
272
for
(tmp = dst; *tmp && count > 0; tmp++, count--)
273
;
274
return
(tmp - dst);
275
}
276
#endif
277
278
static
inline
long
279
strncpy_from_user
(
char
*dst,
const
char
__user *src,
long
count)
280
{
281
if
(!
access_ok
(
VERIFY_READ
, src, 1))
282
return
-
EFAULT
;
283
return
__strncpy_from_user
(dst, src, count);
284
}
285
286
/*
287
* Return the size of a string (including the ending 0)
288
*
289
* Return 0 on exception, a value greater than N if too long
290
*/
291
#ifndef __strnlen_user
292
#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
293
#endif
294
295
/*
296
* Unlike strnlen, strnlen_user includes the nul terminator in
297
* its returned count. Callers should check for a returned value
298
* greater than N as an indication the string is too long.
299
*/
300
static
inline
long
strnlen_user
(
const
char
__user *src,
long
n)
301
{
302
if
(!
access_ok
(
VERIFY_READ
, src, 1))
303
return
0;
304
return
__strnlen_user
(src, n);
305
}
306
307
static
inline
long
strlen_user
(
const
char
__user *src)
308
{
309
return
strnlen_user
(src, 32767);
310
}
311
312
/*
313
* Zero Userspace
314
*/
315
#ifndef __clear_user
316
static
inline
__must_check
unsigned
long
317
__clear_user
(
void
__user *to,
unsigned
long
n)
318
{
319
memset
((
void
__force
*)to, 0, n);
320
return
0;
321
}
322
#endif
323
324
static
inline
__must_check
unsigned
long
325
clear_user
(
void
__user *to,
unsigned
long
n)
326
{
327
might_sleep
();
328
if
(!
access_ok
(
VERIFY_WRITE
, to, n))
329
return
n
;
330
331
return
__clear_user
(to, n);
332
}
333
334
#endif
/* __ASM_GENERIC_UACCESS_H */
Generated on Thu Jan 10 2013 12:50:47 for Linux Kernel by
1.8.2