Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
uaccess.h
Go to the documentation of this file.
1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
3 /*
4  * User space memory access functions
5  */
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13 
14 #define VERIFY_READ 0
15 #define VERIFY_WRITE 1
16 
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not. If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  */
24 
25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26 
27 #define KERNEL_DS MAKE_MM_SEG(-1UL)
28 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
29 
30 #define get_ds() (KERNEL_DS)
31 #define get_fs() (current_thread_info()->addr_limit)
32 #define set_fs(x) (current_thread_info()->addr_limit = (x))
33 
34 #define segment_eq(a, b) ((a).seg == (b).seg)
35 
36 #define user_addr_max() (current_thread_info()->addr_limit.seg)
37 #define __addr_ok(addr) \
38  ((unsigned long __force)(addr) < user_addr_max())
39 
40 /*
41  * Test whether a block of memory is a valid user space address.
42  * Returns 0 if the range is valid, nonzero otherwise.
43  *
44  * This is equivalent to the following test:
45  * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
46  *
47  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48  */
49 
50 #define __range_not_ok(addr, size, limit) \
51 ({ \
52  unsigned long flag, roksum; \
53  __chk_user_ptr(addr); \
54  asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55  : "=&r" (flag), "=r" (roksum) \
56  : "1" (addr), "g" ((long)(size)), \
57  "rm" (limit)); \
58  flag; \
59 })
60 
80 #define access_ok(type, addr, size) \
81  (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
82 
83 /*
84  * The exception table consists of pairs of addresses relative to the
85  * exception table enty itself: the first is the address of an
86  * instruction that is allowed to fault, and the second is the address
87  * at which the program should continue. No registers are modified,
88  * so it is entirely up to the continuation code to figure out what to
89  * do.
90  *
91  * All the routines below use bits of fixup code that are out of line
92  * with the main instruction path. This means when everything is well,
93  * we don't even have to jump over them. Further, they do not intrude
94  * on our cache or tlb entries.
95  */
96 
97 struct exception_table_entry {
98  int insn, fixup;
99 };
100 /* This is not the generic standard exception_table_entry format */
101 #define ARCH_HAS_SORT_EXTABLE
102 #define ARCH_HAS_SEARCH_EXTABLE
103 
104 extern int fixup_exception(struct pt_regs *regs);
105 extern int early_fixup_exception(unsigned long *ip);
106 
107 /*
108  * These are the main single-value transfer routines. They automatically
109  * use the right size if we just have the right pointer type.
110  *
111  * This gets kind of ugly. We want to return _two_ values in "get_user()"
112  * and yet we don't want to do any pointers, because that is too much
113  * of a performance impact. Thus we have a few rather ugly macros here,
114  * and hide all the ugliness from the user.
115  *
116  * The "__xxx" versions of the user access functions are versions that
117  * do not verify the address space, that must have been done previously
118  * with a separate "access_ok()" call (this is used when we do multiple
119  * accesses to the same area of user memory).
120  */
121 
122 extern int __get_user_1(void);
123 extern int __get_user_2(void);
124 extern int __get_user_4(void);
125 extern int __get_user_8(void);
126 extern int __get_user_bad(void);
127 
128 #define __get_user_x(size, ret, x, ptr) \
129  asm volatile("call __get_user_" #size \
130  : "=a" (ret), "=d" (x) \
131  : "0" (ptr)) \
132 
133 /* Careful: we have to cast the result to the type of the pointer
134  * for sign reasons */
135 
153 #ifdef CONFIG_X86_32
154 #define __get_user_8(__ret_gu, __val_gu, ptr) \
155  __get_user_x(X, __ret_gu, __val_gu, ptr)
156 #else
157 #define __get_user_8(__ret_gu, __val_gu, ptr) \
158  __get_user_x(8, __ret_gu, __val_gu, ptr)
159 #endif
160 
161 #define get_user(x, ptr) \
162 ({ \
163  int __ret_gu; \
164  unsigned long __val_gu; \
165  __chk_user_ptr(ptr); \
166  might_fault(); \
167  switch (sizeof(*(ptr))) { \
168  case 1: \
169  __get_user_x(1, __ret_gu, __val_gu, ptr); \
170  break; \
171  case 2: \
172  __get_user_x(2, __ret_gu, __val_gu, ptr); \
173  break; \
174  case 4: \
175  __get_user_x(4, __ret_gu, __val_gu, ptr); \
176  break; \
177  case 8: \
178  __get_user_8(__ret_gu, __val_gu, ptr); \
179  break; \
180  default: \
181  __get_user_x(X, __ret_gu, __val_gu, ptr); \
182  break; \
183  } \
184  (x) = (__typeof__(*(ptr)))__val_gu; \
185  __ret_gu; \
186 })
187 
188 #define __put_user_x(size, x, ptr, __ret_pu) \
189  asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
190  : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
191 
192 
193 
194 #ifdef CONFIG_X86_32
195 #define __put_user_asm_u64(x, addr, err, errret) \
196  asm volatile(ASM_STAC "\n" \
197  "1: movl %%eax,0(%2)\n" \
198  "2: movl %%edx,4(%2)\n" \
199  "3: " ASM_CLAC "\n" \
200  ".section .fixup,\"ax\"\n" \
201  "4: movl %3,%0\n" \
202  " jmp 3b\n" \
203  ".previous\n" \
204  _ASM_EXTABLE(1b, 4b) \
205  _ASM_EXTABLE(2b, 4b) \
206  : "=r" (err) \
207  : "A" (x), "r" (addr), "i" (errret), "0" (err))
208 
209 #define __put_user_asm_ex_u64(x, addr) \
210  asm volatile(ASM_STAC "\n" \
211  "1: movl %%eax,0(%1)\n" \
212  "2: movl %%edx,4(%1)\n" \
213  "3: " ASM_CLAC "\n" \
214  _ASM_EXTABLE_EX(1b, 2b) \
215  _ASM_EXTABLE_EX(2b, 3b) \
216  : : "A" (x), "r" (addr))
217 
218 #define __put_user_x8(x, ptr, __ret_pu) \
219  asm volatile("call __put_user_8" : "=a" (__ret_pu) \
220  : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
221 #else
222 #define __put_user_asm_u64(x, ptr, retval, errret) \
223  __put_user_asm(x, ptr, retval, "q", "", "er", errret)
224 #define __put_user_asm_ex_u64(x, addr) \
225  __put_user_asm_ex(x, addr, "q", "", "er")
226 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
227 #endif
228 
229 extern void __put_user_bad(void);
230 
231 /*
232  * Strange magic calling convention: pointer in %ecx,
233  * value in %eax(:%edx), return value in %eax. clobbers %rbx
234  */
235 extern void __put_user_1(void);
236 extern void __put_user_2(void);
237 extern void __put_user_4(void);
238 extern void __put_user_8(void);
239 
240 #ifdef CONFIG_X86_WP_WORKS_OK
241 
258 #define put_user(x, ptr) \
259 ({ \
260  int __ret_pu; \
261  __typeof__(*(ptr)) __pu_val; \
262  __chk_user_ptr(ptr); \
263  might_fault(); \
264  __pu_val = x; \
265  switch (sizeof(*(ptr))) { \
266  case 1: \
267  __put_user_x(1, __pu_val, ptr, __ret_pu); \
268  break; \
269  case 2: \
270  __put_user_x(2, __pu_val, ptr, __ret_pu); \
271  break; \
272  case 4: \
273  __put_user_x(4, __pu_val, ptr, __ret_pu); \
274  break; \
275  case 8: \
276  __put_user_x8(__pu_val, ptr, __ret_pu); \
277  break; \
278  default: \
279  __put_user_x(X, __pu_val, ptr, __ret_pu); \
280  break; \
281  } \
282  __ret_pu; \
283 })
284 
285 #define __put_user_size(x, ptr, size, retval, errret) \
286 do { \
287  retval = 0; \
288  __chk_user_ptr(ptr); \
289  switch (size) { \
290  case 1: \
291  __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
292  break; \
293  case 2: \
294  __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
295  break; \
296  case 4: \
297  __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
298  break; \
299  case 8: \
300  __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
301  errret); \
302  break; \
303  default: \
304  __put_user_bad(); \
305  } \
306 } while (0)
307 
308 #define __put_user_size_ex(x, ptr, size) \
309 do { \
310  __chk_user_ptr(ptr); \
311  switch (size) { \
312  case 1: \
313  __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
314  break; \
315  case 2: \
316  __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
317  break; \
318  case 4: \
319  __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
320  break; \
321  case 8: \
322  __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
323  break; \
324  default: \
325  __put_user_bad(); \
326  } \
327 } while (0)
328 
329 #else
330 
331 #define __put_user_size(x, ptr, size, retval, errret) \
332 do { \
333  __typeof__(*(ptr))__pus_tmp = x; \
334  retval = 0; \
335  \
336  if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
337  retval = errret; \
338 } while (0)
339 
340 #define put_user(x, ptr) \
341 ({ \
342  int __ret_pu; \
343  __typeof__(*(ptr))__pus_tmp = x; \
344  __ret_pu = 0; \
345  if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
346  sizeof(*(ptr))) != 0)) \
347  __ret_pu = -EFAULT; \
348  __ret_pu; \
349 })
350 #endif
351 
352 #ifdef CONFIG_X86_32
353 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
354 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
355 #else
356 #define __get_user_asm_u64(x, ptr, retval, errret) \
357  __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
358 #define __get_user_asm_ex_u64(x, ptr) \
359  __get_user_asm_ex(x, ptr, "q", "", "=r")
360 #endif
361 
362 #define __get_user_size(x, ptr, size, retval, errret) \
363 do { \
364  retval = 0; \
365  __chk_user_ptr(ptr); \
366  switch (size) { \
367  case 1: \
368  __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
369  break; \
370  case 2: \
371  __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
372  break; \
373  case 4: \
374  __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
375  break; \
376  case 8: \
377  __get_user_asm_u64(x, ptr, retval, errret); \
378  break; \
379  default: \
380  (x) = __get_user_bad(); \
381  } \
382 } while (0)
383 
384 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
385  asm volatile(ASM_STAC "\n" \
386  "1: mov"itype" %2,%"rtype"1\n" \
387  "2: " ASM_CLAC "\n" \
388  ".section .fixup,\"ax\"\n" \
389  "3: mov %3,%0\n" \
390  " xor"itype" %"rtype"1,%"rtype"1\n" \
391  " jmp 2b\n" \
392  ".previous\n" \
393  _ASM_EXTABLE(1b, 3b) \
394  : "=r" (err), ltype(x) \
395  : "m" (__m(addr)), "i" (errret), "0" (err))
396 
397 #define __get_user_size_ex(x, ptr, size) \
398 do { \
399  __chk_user_ptr(ptr); \
400  switch (size) { \
401  case 1: \
402  __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
403  break; \
404  case 2: \
405  __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
406  break; \
407  case 4: \
408  __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
409  break; \
410  case 8: \
411  __get_user_asm_ex_u64(x, ptr); \
412  break; \
413  default: \
414  (x) = __get_user_bad(); \
415  } \
416 } while (0)
417 
418 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
419  asm volatile("1: mov"itype" %1,%"rtype"0\n" \
420  "2:\n" \
421  _ASM_EXTABLE_EX(1b, 2b) \
422  : ltype(x) : "m" (__m(addr)))
423 
424 #define __put_user_nocheck(x, ptr, size) \
425 ({ \
426  int __pu_err; \
427  __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
428  __pu_err; \
429 })
430 
431 #define __get_user_nocheck(x, ptr, size) \
432 ({ \
433  int __gu_err; \
434  unsigned long __gu_val; \
435  __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
436  (x) = (__force __typeof__(*(ptr)))__gu_val; \
437  __gu_err; \
438 })
439 
440 /* FIXME: this hack is definitely wrong -AK */
441 struct __large_struct { unsigned long buf[100]; };
442 #define __m(x) (*(struct __large_struct __user *)(x))
443 
444 /*
445  * Tell gcc we read from memory instead of writing: this is because
446  * we do not write to any memory gcc knows about, so there are no
447  * aliasing issues.
448  */
449 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
450  asm volatile(ASM_STAC "\n" \
451  "1: mov"itype" %"rtype"1,%2\n" \
452  "2: " ASM_CLAC "\n" \
453  ".section .fixup,\"ax\"\n" \
454  "3: mov %3,%0\n" \
455  " jmp 2b\n" \
456  ".previous\n" \
457  _ASM_EXTABLE(1b, 3b) \
458  : "=r"(err) \
459  : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
460 
461 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
462  asm volatile("1: mov"itype" %"rtype"0,%1\n" \
463  "2:\n" \
464  _ASM_EXTABLE_EX(1b, 2b) \
465  : : ltype(x), "m" (__m(addr)))
466 
467 /*
468  * uaccess_try and catch
469  */
470 #define uaccess_try do { \
471  current_thread_info()->uaccess_err = 0; \
472  stac(); \
473  barrier();
474 
475 #define uaccess_catch(err) \
476  clac(); \
477  (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
478 } while (0)
479 
501 #define __get_user(x, ptr) \
502  __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
503 
524 #define __put_user(x, ptr) \
525  __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
526 
527 #define __get_user_unaligned __get_user
528 #define __put_user_unaligned __put_user
529 
530 /*
531  * {get|put}_user_try and catch
532  *
533  * get_user_try {
534  * get_user_ex(...);
535  * } get_user_catch(err)
536  */
537 #define get_user_try uaccess_try
538 #define get_user_catch(err) uaccess_catch(err)
539 
540 #define get_user_ex(x, ptr) do { \
541  unsigned long __gue_val; \
542  __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
543  (x) = (__force __typeof__(*(ptr)))__gue_val; \
544 } while (0)
545 
546 #ifdef CONFIG_X86_WP_WORKS_OK
547 
548 #define put_user_try uaccess_try
549 #define put_user_catch(err) uaccess_catch(err)
550 
551 #define put_user_ex(x, ptr) \
552  __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
553 
554 #else /* !CONFIG_X86_WP_WORKS_OK */
555 
556 #define put_user_try do { \
557  int __uaccess_err = 0;
558 
559 #define put_user_catch(err) \
560  (err) |= __uaccess_err; \
561 } while (0)
562 
563 #define put_user_ex(x, ptr) do { \
564  __uaccess_err |= __put_user(x, ptr); \
565 } while (0)
566 
567 #endif /* CONFIG_X86_WP_WORKS_OK */
568 
569 extern unsigned long
570 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
571 extern __must_check long
572 strncpy_from_user(char *dst, const char __user *src, long count);
573 
574 extern __must_check long strlen_user(const char __user *str);
575 extern __must_check long strnlen_user(const char __user *str, long n);
576 
577 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
578 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
579 
580 /*
581  * movsl can be slow when source and dest are not both 8-byte aligned
582  */
583 #ifdef CONFIG_X86_INTEL_USERCOPY
584 extern struct movsl_mask {
585  int mask;
586 } ____cacheline_aligned_in_smp movsl_mask;
587 #endif
588 
589 #define ARCH_HAS_NOCACHE_UACCESS 1
590 
591 #ifdef CONFIG_X86_32
592 # include <asm/uaccess_32.h>
593 #else
594 # include <asm/uaccess_64.h>
595 #endif
596 
597 #endif /* _ASM_X86_UACCESS_H */
598