Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
m68k
include
asm
uaccess_mm.h
Go to the documentation of this file.
1
#ifndef __M68K_UACCESS_H
2
#define __M68K_UACCESS_H
3
4
/*
5
* User space memory access functions
6
*/
7
#include <linux/compiler.h>
8
#include <linux/errno.h>
9
#include <linux/types.h>
10
#include <linux/sched.h>
11
#include <asm/segment.h>
12
13
#define VERIFY_READ 0
14
#define VERIFY_WRITE 1
15
16
/* We let the MMU do all checking */
17
static
inline
int
access_ok
(
int
type
,
const
void
__user *
addr
,
18
unsigned
long
size
)
19
{
20
return
1;
21
}
22
23
/*
24
* Not all varients of the 68k family support the notion of address spaces.
25
* The traditional 680x0 parts do, and they use the sfc/dfc registers and
26
* the "moves" instruction to access user space from kernel space. Other
27
* family members like ColdFire don't support this, and only have a single
28
* address space, and use the usual "move" instruction for user space access.
29
*
30
* Outside of this difference the user space access functions are the same.
31
* So lets keep the code simple and just define in what we need to use.
32
*/
33
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
34
#define MOVES "moves"
35
#else
36
#define MOVES "move"
37
#endif
38
39
/*
40
* The exception table consists of pairs of addresses: the first is the
41
* address of an instruction that is allowed to fault, and the second is
42
* the address at which the program should continue. No registers are
43
* modified, so it is entirely up to the continuation code to figure out
44
* what to do.
45
*
46
* All the routines below use bits of fixup code that are out of line
47
* with the main instruction path. This means when everything is well,
48
* we don't even have to jump over them. Further, they do not intrude
49
* on our cache or tlb entries.
50
*/
51
52
struct
exception_table_entry
53
{
54
unsigned
long
insn
,
fixup
;
55
};
56
57
extern
int
__put_user_bad
(
void
);
58
extern
int
__get_user_bad
(
void
);
59
60
#define __put_user_asm(res, x, ptr, bwl, reg, err) \
61
asm volatile ("\n" \
62
"1: "MOVES"."#bwl" %2,%1\n" \
63
"2:\n" \
64
" .section .fixup,\"ax\"\n" \
65
" .even\n" \
66
"10: moveq.l %3,%0\n" \
67
" jra 2b\n" \
68
" .previous\n" \
69
"\n" \
70
" .section __ex_table,\"a\"\n" \
71
" .align 4\n" \
72
" .long 1b,10b\n" \
73
" .long 2b,10b\n" \
74
" .previous" \
75
: "+d" (res), "=m" (*(ptr)) \
76
: #reg (x), "i" (err))
77
78
/*
79
* These are the main single-value transfer routines. They automatically
80
* use the right size if we just have the right pointer type.
81
*/
82
83
#define __put_user(x, ptr) \
84
({ \
85
typeof(*(ptr)) __pu_val = (x); \
86
int __pu_err = 0; \
87
__chk_user_ptr(ptr); \
88
switch (sizeof (*(ptr))) { \
89
case 1: \
90
__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
91
break; \
92
case 2: \
93
__put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
94
break; \
95
case 4: \
96
__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
97
break; \
98
case 8: \
99
{ \
100
const void __user *__pu_ptr = (ptr); \
101
asm volatile ("\n" \
102
"1: "MOVES".l %2,(%1)+\n" \
103
"2: "MOVES".l %R2,(%1)\n" \
104
"3:\n" \
105
" .section .fixup,\"ax\"\n" \
106
" .even\n" \
107
"10: movel %3,%0\n" \
108
" jra 3b\n" \
109
" .previous\n" \
110
"\n" \
111
" .section __ex_table,\"a\"\n" \
112
" .align 4\n" \
113
" .long 1b,10b\n" \
114
" .long 2b,10b\n" \
115
" .long 3b,10b\n" \
116
" .previous" \
117
: "+d" (__pu_err), "+a" (__pu_ptr) \
118
: "r" (__pu_val), "i" (-EFAULT) \
119
: "memory"); \
120
break; \
121
} \
122
default: \
123
__pu_err = __put_user_bad(); \
124
break; \
125
} \
126
__pu_err; \
127
})
128
#define put_user(x, ptr) __put_user(x, ptr)
129
130
131
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
132
type __gu_val; \
133
asm volatile ("\n" \
134
"1: "MOVES"."#bwl" %2,%1\n" \
135
"2:\n" \
136
" .section .fixup,\"ax\"\n" \
137
" .even\n" \
138
"10: move.l %3,%0\n" \
139
" sub.l %1,%1\n" \
140
" jra 2b\n" \
141
" .previous\n" \
142
"\n" \
143
" .section __ex_table,\"a\"\n" \
144
" .align 4\n" \
145
" .long 1b,10b\n" \
146
" .previous" \
147
: "+d" (res), "=&" #reg (__gu_val) \
148
: "m" (*(ptr)), "i" (err)); \
149
(x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
150
})
151
152
#define __get_user(x, ptr) \
153
({ \
154
int __gu_err = 0; \
155
__chk_user_ptr(ptr); \
156
switch (sizeof(*(ptr))) { \
157
case 1: \
158
__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
159
break; \
160
case 2: \
161
__get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
162
break; \
163
case 4: \
164
__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
165
break; \
166
/* case 8: disabled because gcc-4.1 has a broken typeof \
167
{ \
168
const void *__gu_ptr = (ptr); \
169
u64 __gu_val; \
170
asm volatile ("\n" \
171
"1: "MOVES".l (%2)+,%1\n" \
172
"2: "MOVES".l (%2),%R1\n" \
173
"3:\n" \
174
" .section .fixup,\"ax\"\n" \
175
" .even\n" \
176
"10: move.l %3,%0\n" \
177
" sub.l %1,%1\n" \
178
" sub.l %R1,%R1\n" \
179
" jra 3b\n" \
180
" .previous\n" \
181
"\n" \
182
" .section __ex_table,\"a\"\n" \
183
" .align 4\n" \
184
" .long 1b,10b\n" \
185
" .long 2b,10b\n" \
186
" .previous" \
187
: "+d" (__gu_err), "=&r" (__gu_val), \
188
"+a" (__gu_ptr) \
189
: "i" (-EFAULT) \
190
: "memory"); \
191
(x) = (typeof(*(ptr)))__gu_val; \
192
break; \
193
} */
\
194
default: \
195
__gu_err = __get_user_bad(); \
196
break; \
197
} \
198
__gu_err; \
199
})
200
#define get_user(x, ptr) __get_user(x, ptr)
201
202
unsigned
long
__generic_copy_from_user
(
void
*to,
const
void
__user *
from
,
unsigned
long
n
);
203
unsigned
long
__generic_copy_to_user
(
void
__user *to,
const
void
*
from
,
unsigned
long
n
);
204
205
#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
206
asm volatile ("\n" \
207
"1: "MOVES"."#s1" (%2)+,%3\n" \
208
" move."#s1" %3,(%1)+\n" \
209
"2: "MOVES"."#s2" (%2)+,%3\n" \
210
" move."#s2" %3,(%1)+\n" \
211
" .ifnc \""#s3"\",\"\"\n" \
212
"3: "MOVES"."#s3" (%2)+,%3\n" \
213
" move."#s3" %3,(%1)+\n" \
214
" .endif\n" \
215
"4:\n" \
216
" .section __ex_table,\"a\"\n" \
217
" .align 4\n" \
218
" .long 1b,10f\n" \
219
" .long 2b,20f\n" \
220
" .ifnc \""#s3"\",\"\"\n" \
221
" .long 3b,30f\n" \
222
" .endif\n" \
223
" .previous\n" \
224
"\n" \
225
" .section .fixup,\"ax\"\n" \
226
" .even\n" \
227
"10: clr."#s1" (%1)+\n" \
228
"20: clr."#s2" (%1)+\n" \
229
" .ifnc \""#s3"\",\"\"\n" \
230
"30: clr."#s3" (%1)+\n" \
231
" .endif\n" \
232
" moveq.l #"#n",%0\n" \
233
" jra 4b\n" \
234
" .previous\n" \
235
: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
236
: : "memory")
237
238
static
__always_inline
unsigned
long
239
__constant_copy_from_user(
void
*to,
const
void
__user *
from
,
unsigned
long
n
)
240
{
241
unsigned
long
res
= 0,
tmp
;
242
243
switch
(n) {
244
case
1:
245
__get_user_asm
(res, *(
u8
*)to, (
u8
__user *)from,
u8
,
b
,
d
, 1);
246
break
;
247
case
2:
248
__get_user_asm
(res, *(
u16
*)to, (
u16
__user *)from,
u16
,
w
,
d
, 2);
249
break
;
250
case
3:
251
__constant_copy_from_user_asm
(res, to, from,
tmp
, 3,
w
,
b
,);
252
break
;
253
case
4:
254
__get_user_asm
(res, *(
u32
*)to, (
u32
__user *)from,
u32
,
l
,
r
, 4);
255
break
;
256
case
5:
257
__constant_copy_from_user_asm
(res, to, from,
tmp
, 5,
l
,
b
,);
258
break
;
259
case
6:
260
__constant_copy_from_user_asm
(res, to, from,
tmp
, 6,
l
,
w
,);
261
break
;
262
case
7:
263
__constant_copy_from_user_asm
(res, to, from,
tmp
, 7,
l
,
w
,
b
);
264
break
;
265
case
8:
266
__constant_copy_from_user_asm
(res, to, from,
tmp
, 8,
l
,
l
,);
267
break
;
268
case
9:
269
__constant_copy_from_user_asm
(res, to, from,
tmp
, 9,
l
,
l
,
b
);
270
break
;
271
case
10:
272
__constant_copy_from_user_asm
(res, to, from,
tmp
, 10,
l
,
l
,
w
);
273
break
;
274
case
12:
275
__constant_copy_from_user_asm
(res, to, from,
tmp
, 12,
l
,
l
,
l
);
276
break
;
277
default
:
278
/* we limit the inlined version to 3 moves */
279
return
__generic_copy_from_user
(to, from, n);
280
}
281
282
return
res
;
283
}
284
285
#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
286
asm volatile ("\n" \
287
" move."#s1" (%2)+,%3\n" \
288
"11: "MOVES"."#s1" %3,(%1)+\n" \
289
"12: move."#s2" (%2)+,%3\n" \
290
"21: "MOVES"."#s2" %3,(%1)+\n" \
291
"22:\n" \
292
" .ifnc \""#s3"\",\"\"\n" \
293
" move."#s3" (%2)+,%3\n" \
294
"31: "MOVES"."#s3" %3,(%1)+\n" \
295
"32:\n" \
296
" .endif\n" \
297
"4:\n" \
298
"\n" \
299
" .section __ex_table,\"a\"\n" \
300
" .align 4\n" \
301
" .long 11b,5f\n" \
302
" .long 12b,5f\n" \
303
" .long 21b,5f\n" \
304
" .long 22b,5f\n" \
305
" .ifnc \""#s3"\",\"\"\n" \
306
" .long 31b,5f\n" \
307
" .long 32b,5f\n" \
308
" .endif\n" \
309
" .previous\n" \
310
"\n" \
311
" .section .fixup,\"ax\"\n" \
312
" .even\n" \
313
"5: moveq.l #"#n",%0\n" \
314
" jra 4b\n" \
315
" .previous\n" \
316
: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
317
: : "memory")
318
319
static
__always_inline
unsigned
long
320
__constant_copy_to_user(
void
__user *to,
const
void
*from,
unsigned
long
n)
321
{
322
unsigned
long
res = 0,
tmp
;
323
324
switch
(n) {
325
case
1:
326
__put_user_asm
(res, *(
u8
*)from, (
u8
__user *)to,
b
,
d
, 1);
327
break
;
328
case
2:
329
__put_user_asm
(res, *(
u16
*)from, (
u16
__user *)to,
w
,
d
, 2);
330
break
;
331
case
3:
332
__constant_copy_to_user_asm
(res, to, from,
tmp
, 3,
w
,
b
,);
333
break
;
334
case
4:
335
__put_user_asm
(res, *(
u32
*)from, (
u32
__user *)to,
l
,
r
, 4);
336
break
;
337
case
5:
338
__constant_copy_to_user_asm
(res, to, from,
tmp
, 5,
l
,
b
,);
339
break
;
340
case
6:
341
__constant_copy_to_user_asm
(res, to, from,
tmp
, 6,
l
,
w
,);
342
break
;
343
case
7:
344
__constant_copy_to_user_asm
(res, to, from,
tmp
, 7,
l
,
w
,
b
);
345
break
;
346
case
8:
347
__constant_copy_to_user_asm
(res, to, from,
tmp
, 8,
l
,
l
,);
348
break
;
349
case
9:
350
__constant_copy_to_user_asm
(res, to, from,
tmp
, 9,
l
,
l
,
b
);
351
break
;
352
case
10:
353
__constant_copy_to_user_asm
(res, to, from,
tmp
, 10,
l
,
l
,
w
);
354
break
;
355
case
12:
356
__constant_copy_to_user_asm
(res, to, from,
tmp
, 12,
l
,
l
,
l
);
357
break
;
358
default
:
359
/* limit the inlined version to 3 moves */
360
return
__generic_copy_to_user
(to, from, n);
361
}
362
363
return
res
;
364
}
365
366
#define __copy_from_user(to, from, n) \
367
(__builtin_constant_p(n) ? \
368
__constant_copy_from_user(to, from, n) : \
369
__generic_copy_from_user(to, from, n))
370
371
#define __copy_to_user(to, from, n) \
372
(__builtin_constant_p(n) ? \
373
__constant_copy_to_user(to, from, n) : \
374
__generic_copy_to_user(to, from, n))
375
376
#define __copy_to_user_inatomic __copy_to_user
377
#define __copy_from_user_inatomic __copy_from_user
378
379
#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
380
#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
381
382
#define user_addr_max() \
383
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
384
385
extern
long
strncpy_from_user
(
char
*
dst
,
const
char
__user *
src
,
long
count
);
386
extern
__must_check
long
strlen_user
(
const
char
__user *
str
);
387
extern
__must_check
long
strnlen_user
(
const
char
__user *
str
,
long
n);
388
389
unsigned
long
__clear_user
(
void
__user *to,
unsigned
long
n);
390
391
#define clear_user __clear_user
392
393
#endif
/* _M68K_UACCESS_H */
Generated on Thu Jan 10 2013 13:08:08 for Linux Kernel by
1.8.2