Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
uaccess.h
Go to the documentation of this file.
1 #ifndef __ALPHA_UACCESS_H
2 #define __ALPHA_UACCESS_H
3 
4 #include <linux/errno.h>
5 #include <linux/sched.h>
6 
7 
8 /*
9  * The fs value determines whether argument validity checking should be
10  * performed or not. If get_fs() == USER_DS, checking is performed, with
11  * get_fs() == KERNEL_DS, checking is bypassed.
12  *
13  * Or at least it did once upon a time. Nowadays it is a mask that
14  * defines which bits of the address space are off limits. This is a
15  * wee bit faster than the above.
16  *
17  * For historical reasons, these macros are grossly misnamed.
18  */
19 
20 #define KERNEL_DS ((mm_segment_t) { 0UL })
21 #define USER_DS ((mm_segment_t) { -0x40000000000UL })
22 
23 #define VERIFY_READ 0
24 #define VERIFY_WRITE 1
25 
26 #define get_fs() (current_thread_info()->addr_limit)
27 #define get_ds() (KERNEL_DS)
28 #define set_fs(x) (current_thread_info()->addr_limit = (x))
29 
30 #define segment_eq(a,b) ((a).seg == (b).seg)
31 
32 /*
33  * Is a address valid? This does a straightforward calculation rather
34  * than tests.
35  *
36  * Address valid if:
37  * - "addr" doesn't have any high-bits set
38  * - AND "size" doesn't have any high-bits set
39  * - AND "addr+size" doesn't have any high-bits set
40  * - OR we are in kernel mode.
41  */
42 #define __access_ok(addr,size,segment) \
43  (((segment).seg & (addr | size | (addr+size))) == 0)
44 
45 #define access_ok(type,addr,size) \
46 ({ \
47  __chk_user_ptr(addr); \
48  __access_ok(((unsigned long)(addr)),(size),get_fs()); \
49 })
50 
51 /*
52  * These are the main single-value transfer routines. They automatically
53  * use the right size if we just have the right pointer type.
54  *
55  * As the alpha uses the same address space for kernel and user
56  * data, we can just do these as direct assignments. (Of course, the
57  * exception handling means that it's no longer "just"...)
58  *
59  * Careful to not
60  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
61  * (b) require any knowledge of processes at this stage
62  */
63 #define put_user(x,ptr) \
64  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
65 #define get_user(x,ptr) \
66  __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
67 
68 /*
69  * The "__xxx" versions do not do address space checking, useful when
70  * doing multiple accesses to the same area (the programmer has to do the
71  * checks by hand with "access_ok()")
72  */
73 #define __put_user(x,ptr) \
74  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
75 #define __get_user(x,ptr) \
76  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
77 
78 /*
79  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
80  * encode the bits we need for resolving the exception. See the
81  * more extensive comments with fixup_inline_exception below for
82  * more information.
83  */
84 
85 extern void __get_user_unknown(void);
86 
87 #define __get_user_nocheck(x,ptr,size) \
88 ({ \
89  long __gu_err = 0; \
90  unsigned long __gu_val; \
91  __chk_user_ptr(ptr); \
92  switch (size) { \
93  case 1: __get_user_8(ptr); break; \
94  case 2: __get_user_16(ptr); break; \
95  case 4: __get_user_32(ptr); break; \
96  case 8: __get_user_64(ptr); break; \
97  default: __get_user_unknown(); break; \
98  } \
99  (x) = (__typeof__(*(ptr))) __gu_val; \
100  __gu_err; \
101 })
102 
103 #define __get_user_check(x,ptr,size,segment) \
104 ({ \
105  long __gu_err = -EFAULT; \
106  unsigned long __gu_val = 0; \
107  const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
108  if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
109  __gu_err = 0; \
110  switch (size) { \
111  case 1: __get_user_8(__gu_addr); break; \
112  case 2: __get_user_16(__gu_addr); break; \
113  case 4: __get_user_32(__gu_addr); break; \
114  case 8: __get_user_64(__gu_addr); break; \
115  default: __get_user_unknown(); break; \
116  } \
117  } \
118  (x) = (__typeof__(*(ptr))) __gu_val; \
119  __gu_err; \
120 })
121 
122 struct __large_struct { unsigned long buf[100]; };
123 #define __m(x) (*(struct __large_struct __user *)(x))
124 
125 #define __get_user_64(addr) \
126  __asm__("1: ldq %0,%2\n" \
127  "2:\n" \
128  ".section __ex_table,\"a\"\n" \
129  " .long 1b - .\n" \
130  " lda %0, 2b-1b(%1)\n" \
131  ".previous" \
132  : "=r"(__gu_val), "=r"(__gu_err) \
133  : "m"(__m(addr)), "1"(__gu_err))
134 
135 #define __get_user_32(addr) \
136  __asm__("1: ldl %0,%2\n" \
137  "2:\n" \
138  ".section __ex_table,\"a\"\n" \
139  " .long 1b - .\n" \
140  " lda %0, 2b-1b(%1)\n" \
141  ".previous" \
142  : "=r"(__gu_val), "=r"(__gu_err) \
143  : "m"(__m(addr)), "1"(__gu_err))
144 
145 #ifdef __alpha_bwx__
146 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
147 
148 #define __get_user_16(addr) \
149  __asm__("1: ldwu %0,%2\n" \
150  "2:\n" \
151  ".section __ex_table,\"a\"\n" \
152  " .long 1b - .\n" \
153  " lda %0, 2b-1b(%1)\n" \
154  ".previous" \
155  : "=r"(__gu_val), "=r"(__gu_err) \
156  : "m"(__m(addr)), "1"(__gu_err))
157 
158 #define __get_user_8(addr) \
159  __asm__("1: ldbu %0,%2\n" \
160  "2:\n" \
161  ".section __ex_table,\"a\"\n" \
162  " .long 1b - .\n" \
163  " lda %0, 2b-1b(%1)\n" \
164  ".previous" \
165  : "=r"(__gu_val), "=r"(__gu_err) \
166  : "m"(__m(addr)), "1"(__gu_err))
167 #else
168 /* Unfortunately, we can't get an unaligned access trap for the sub-word
169  load, so we have to do a general unaligned operation. */
170 
171 #define __get_user_16(addr) \
172 { \
173  long __gu_tmp; \
174  __asm__("1: ldq_u %0,0(%3)\n" \
175  "2: ldq_u %1,1(%3)\n" \
176  " extwl %0,%3,%0\n" \
177  " extwh %1,%3,%1\n" \
178  " or %0,%1,%0\n" \
179  "3:\n" \
180  ".section __ex_table,\"a\"\n" \
181  " .long 1b - .\n" \
182  " lda %0, 3b-1b(%2)\n" \
183  " .long 2b - .\n" \
184  " lda %0, 3b-2b(%2)\n" \
185  ".previous" \
186  : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
187  : "r"(addr), "2"(__gu_err)); \
188 }
189 
190 #define __get_user_8(addr) \
191  __asm__("1: ldq_u %0,0(%2)\n" \
192  " extbl %0,%2,%0\n" \
193  "2:\n" \
194  ".section __ex_table,\"a\"\n" \
195  " .long 1b - .\n" \
196  " lda %0, 2b-1b(%1)\n" \
197  ".previous" \
198  : "=&r"(__gu_val), "=r"(__gu_err) \
199  : "r"(addr), "1"(__gu_err))
200 #endif
201 
202 extern void __put_user_unknown(void);
203 
204 #define __put_user_nocheck(x,ptr,size) \
205 ({ \
206  long __pu_err = 0; \
207  __chk_user_ptr(ptr); \
208  switch (size) { \
209  case 1: __put_user_8(x,ptr); break; \
210  case 2: __put_user_16(x,ptr); break; \
211  case 4: __put_user_32(x,ptr); break; \
212  case 8: __put_user_64(x,ptr); break; \
213  default: __put_user_unknown(); break; \
214  } \
215  __pu_err; \
216 })
217 
218 #define __put_user_check(x,ptr,size,segment) \
219 ({ \
220  long __pu_err = -EFAULT; \
221  __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
222  if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
223  __pu_err = 0; \
224  switch (size) { \
225  case 1: __put_user_8(x,__pu_addr); break; \
226  case 2: __put_user_16(x,__pu_addr); break; \
227  case 4: __put_user_32(x,__pu_addr); break; \
228  case 8: __put_user_64(x,__pu_addr); break; \
229  default: __put_user_unknown(); break; \
230  } \
231  } \
232  __pu_err; \
233 })
234 
235 /*
236  * The "__put_user_xx()" macros tell gcc they read from memory
237  * instead of writing: this is because they do not write to
238  * any memory gcc knows about, so there are no aliasing issues
239  */
240 #define __put_user_64(x,addr) \
241 __asm__ __volatile__("1: stq %r2,%1\n" \
242  "2:\n" \
243  ".section __ex_table,\"a\"\n" \
244  " .long 1b - .\n" \
245  " lda $31,2b-1b(%0)\n" \
246  ".previous" \
247  : "=r"(__pu_err) \
248  : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
249 
250 #define __put_user_32(x,addr) \
251 __asm__ __volatile__("1: stl %r2,%1\n" \
252  "2:\n" \
253  ".section __ex_table,\"a\"\n" \
254  " .long 1b - .\n" \
255  " lda $31,2b-1b(%0)\n" \
256  ".previous" \
257  : "=r"(__pu_err) \
258  : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
259 
260 #ifdef __alpha_bwx__
261 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
262 
263 #define __put_user_16(x,addr) \
264 __asm__ __volatile__("1: stw %r2,%1\n" \
265  "2:\n" \
266  ".section __ex_table,\"a\"\n" \
267  " .long 1b - .\n" \
268  " lda $31,2b-1b(%0)\n" \
269  ".previous" \
270  : "=r"(__pu_err) \
271  : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
272 
273 #define __put_user_8(x,addr) \
274 __asm__ __volatile__("1: stb %r2,%1\n" \
275  "2:\n" \
276  ".section __ex_table,\"a\"\n" \
277  " .long 1b - .\n" \
278  " lda $31,2b-1b(%0)\n" \
279  ".previous" \
280  : "=r"(__pu_err) \
281  : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
282 #else
283 /* Unfortunately, we can't get an unaligned access trap for the sub-word
284  write, so we have to do a general unaligned operation. */
285 
286 #define __put_user_16(x,addr) \
287 { \
288  long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
289  __asm__ __volatile__( \
290  "1: ldq_u %2,1(%5)\n" \
291  "2: ldq_u %1,0(%5)\n" \
292  " inswh %6,%5,%4\n" \
293  " inswl %6,%5,%3\n" \
294  " mskwh %2,%5,%2\n" \
295  " mskwl %1,%5,%1\n" \
296  " or %2,%4,%2\n" \
297  " or %1,%3,%1\n" \
298  "3: stq_u %2,1(%5)\n" \
299  "4: stq_u %1,0(%5)\n" \
300  "5:\n" \
301  ".section __ex_table,\"a\"\n" \
302  " .long 1b - .\n" \
303  " lda $31, 5b-1b(%0)\n" \
304  " .long 2b - .\n" \
305  " lda $31, 5b-2b(%0)\n" \
306  " .long 3b - .\n" \
307  " lda $31, 5b-3b(%0)\n" \
308  " .long 4b - .\n" \
309  " lda $31, 5b-4b(%0)\n" \
310  ".previous" \
311  : "=r"(__pu_err), "=&r"(__pu_tmp1), \
312  "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
313  "=&r"(__pu_tmp4) \
314  : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
315 }
316 
317 #define __put_user_8(x,addr) \
318 { \
319  long __pu_tmp1, __pu_tmp2; \
320  __asm__ __volatile__( \
321  "1: ldq_u %1,0(%4)\n" \
322  " insbl %3,%4,%2\n" \
323  " mskbl %1,%4,%1\n" \
324  " or %1,%2,%1\n" \
325  "2: stq_u %1,0(%4)\n" \
326  "3:\n" \
327  ".section __ex_table,\"a\"\n" \
328  " .long 1b - .\n" \
329  " lda $31, 3b-1b(%0)\n" \
330  " .long 2b - .\n" \
331  " lda $31, 3b-2b(%0)\n" \
332  ".previous" \
333  : "=r"(__pu_err), \
334  "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
335  : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
336 }
337 #endif
338 
339 
340 /*
341  * Complex access routines
342  */
343 
344 /* This little bit of silliness is to get the GP loaded for a function
345  that ordinarily wouldn't. Otherwise we could have it done by the macro
346  directly, which can be optimized the linker. */
347 #ifdef MODULE
348 #define __module_address(sym) "r"(sym),
349 #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
350 #else
351 #define __module_address(sym)
352 #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
353 #endif
354 
355 extern void __copy_user(void);
356 
357 extern inline long
358 __copy_tofrom_user_nocheck(void *to, const void *from, long len)
359 {
360  register void * __cu_to __asm__("$6") = to;
361  register const void * __cu_from __asm__("$7") = from;
362  register long __cu_len __asm__("$0") = len;
363 
364  __asm__ __volatile__(
365  __module_call(28, 3, __copy_user)
366  : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
368  "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
369  : "$1","$2","$3","$4","$5","$28","memory");
370 
371  return __cu_len;
372 }
373 
374 extern inline long
375 __copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
376 {
377  if (__access_ok((unsigned long)validate, len, get_fs()))
378  len = __copy_tofrom_user_nocheck(to, from, len);
379  return len;
380 }
381 
382 #define __copy_to_user(to,from,n) \
383 ({ \
384  __chk_user_ptr(to); \
385  __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \
386 })
387 #define __copy_from_user(to,from,n) \
388 ({ \
389  __chk_user_ptr(from); \
390  __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
391 })
392 
393 #define __copy_to_user_inatomic __copy_to_user
394 #define __copy_from_user_inatomic __copy_from_user
395 
396 
397 extern inline long
398 copy_to_user(void __user *to, const void *from, long n)
399 {
400  return __copy_tofrom_user((__force void *)to, from, n, to);
401 }
402 
403 extern inline long
404 copy_from_user(void *to, const void __user *from, long n)
405 {
406  return __copy_tofrom_user(to, (__force void *)from, n, from);
407 }
408 
409 extern void __do_clear_user(void);
410 
411 extern inline long
412 __clear_user(void __user *to, long len)
413 {
414  register void __user * __cl_to __asm__("$6") = to;
415  register long __cl_len __asm__("$0") = len;
416  __asm__ __volatile__(
418  : "=r"(__cl_len), "=r"(__cl_to)
420  "0"(__cl_len), "1"(__cl_to)
421  : "$1","$2","$3","$4","$5","$28","memory");
422  return __cl_len;
423 }
424 
425 extern inline long
426 clear_user(void __user *to, long len)
427 {
428  if (__access_ok((unsigned long)to, len, get_fs()))
429  len = __clear_user(to, len);
430  return len;
431 }
432 
433 #undef __module_address
434 #undef __module_call
435 
436 #define user_addr_max() \
437  (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
438 
439 extern long strncpy_from_user(char *dest, const char __user *src, long count);
440 extern __must_check long strlen_user(const char __user *str);
441 extern __must_check long strnlen_user(const char __user *str, long n);
442 
443 /*
444  * About the exception table:
445  *
446  * - insn is a 32-bit pc-relative offset from the faulting insn.
447  * - nextinsn is a 16-bit offset off of the faulting instruction
448  * (not off of the *next* instruction as branches are).
449  * - errreg is the register in which to place -EFAULT.
450  * - valreg is the final target register for the load sequence
451  * and will be zeroed.
452  *
453  * Either errreg or valreg may be $31, in which case nothing happens.
454  *
455  * The exception fixup information "just so happens" to be arranged
456  * as in a MEM format instruction. This lets us emit our three
457  * values like so:
458  *
459  * lda valreg, nextinsn(errreg)
460  *
461  */
462 
464 {
465  signed int insn;
467  unsigned unit;
468  struct {
469  signed int nextinsn : 16;
470  unsigned int errreg : 5;
471  unsigned int valreg : 5;
472  } bits;
473  } fixup;
474 };
475 
476 /* Returns the new pc */
477 #define fixup_exception(map_reg, _fixup, pc) \
478 ({ \
479  if ((_fixup)->fixup.bits.valreg != 31) \
480  map_reg((_fixup)->fixup.bits.valreg) = 0; \
481  if ((_fixup)->fixup.bits.errreg != 31) \
482  map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
483  (pc) + (_fixup)->fixup.bits.nextinsn; \
484 })
485 
486 #define ARCH_HAS_SORT_EXTABLE
487 #define ARCH_HAS_SEARCH_EXTABLE
488 
489 #endif /* __ALPHA_UACCESS_H */