Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
uaccess.h
Go to the documentation of this file.
1 /*
2  * S390 version
3  * Copyright IBM Corp. 1999, 2000
4  * Author(s): Hartmut Penner ([email protected]),
5  * Martin Schwidefsky ([email protected])
6  *
7  * Derived from "include/asm-i386/uaccess.h"
8  */
9 #ifndef __S390_UACCESS_H
10 #define __S390_UACCESS_H
11 
12 /*
13  * User space memory access functions
14  */
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <asm/ctl_reg.h>
18 
19 #define VERIFY_READ 0
20 #define VERIFY_WRITE 1
21 
22 
23 /*
24  * The fs value determines whether argument validity checking should be
25  * performed or not. If get_fs() == USER_DS, checking is performed, with
26  * get_fs() == KERNEL_DS, checking is bypassed.
27  *
28  * For historical reasons, these macros are grossly misnamed.
29  */
30 
31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32 
33 
34 #define KERNEL_DS MAKE_MM_SEG(0)
35 #define USER_DS MAKE_MM_SEG(1)
36 
37 #define get_ds() (KERNEL_DS)
38 #define get_fs() (current->thread.mm_segment)
39 
40 #define set_fs(x) \
41 ({ \
42  unsigned long __pto; \
43  current->thread.mm_segment = (x); \
44  __pto = current->thread.mm_segment.ar4 ? \
45  S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
46  __ctl_load(__pto, 7, 7); \
47 })
48 
49 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
50 
51 static inline int __range_ok(unsigned long addr, unsigned long size)
52 {
53  return 1;
54 }
55 
56 #define __access_ok(addr, size) \
57 ({ \
58  __chk_user_ptr(addr); \
59  __range_ok((unsigned long)(addr), (size)); \
60 })
61 
62 #define access_ok(type, addr, size) __access_ok(addr, size)
63 
64 /*
65  * The exception table consists of pairs of addresses: the first is the
66  * address of an instruction that is allowed to fault, and the second is
67  * the address at which the program should continue. No registers are
68  * modified, so it is entirely up to the continuation code to figure out
69  * what to do.
70  *
71  * All the routines below use bits of fixup code that are out of line
72  * with the main instruction path. This means when everything is well,
73  * we don't even have to jump over them. Further, they do not intrude
74  * on our cache or tlb entries.
75  */
76 
78 {
79  int insn, fixup;
80 };
81 
82 static inline unsigned long extable_insn(const struct exception_table_entry *x)
83 {
84  return (unsigned long)&x->insn + x->insn;
85 }
86 
87 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88 {
89  return (unsigned long)&x->fixup + x->fixup;
90 }
91 
92 #define ARCH_HAS_SORT_EXTABLE
93 #define ARCH_HAS_SEARCH_EXTABLE
94 
95 struct uaccess_ops {
96  size_t (*copy_from_user)(size_t, const void __user *, void *);
97  size_t (*copy_from_user_small)(size_t, const void __user *, void *);
98  size_t (*copy_to_user)(size_t, void __user *, const void *);
99  size_t (*copy_to_user_small)(size_t, void __user *, const void *);
100  size_t (*copy_in_user)(size_t, void __user *, const void __user *);
102  size_t (*strnlen_user)(size_t, const char __user *);
103  size_t (*strncpy_from_user)(size_t, const char __user *, char *);
104  int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
106 };
107 
108 extern struct uaccess_ops uaccess;
109 extern struct uaccess_ops uaccess_std;
110 extern struct uaccess_ops uaccess_mvcos;
111 extern struct uaccess_ops uaccess_mvcos_switch;
112 extern struct uaccess_ops uaccess_pt;
113 
114 extern int __handle_fault(unsigned long, unsigned long, int);
115 
116 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
117 {
118  size = uaccess.copy_to_user_small(size, ptr, x);
119  return size ? -EFAULT : size;
120 }
121 
122 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
123 {
124  size = uaccess.copy_from_user_small(size, ptr, x);
125  return size ? -EFAULT : size;
126 }
127 
128 /*
129  * These are the main single-value transfer routines. They automatically
130  * use the right size if we just have the right pointer type.
131  */
132 #define __put_user(x, ptr) \
133 ({ \
134  __typeof__(*(ptr)) __x = (x); \
135  int __pu_err = -EFAULT; \
136  __chk_user_ptr(ptr); \
137  switch (sizeof (*(ptr))) { \
138  case 1: \
139  case 2: \
140  case 4: \
141  case 8: \
142  __pu_err = __put_user_fn(sizeof (*(ptr)), \
143  ptr, &__x); \
144  break; \
145  default: \
146  __put_user_bad(); \
147  break; \
148  } \
149  __pu_err; \
150 })
151 
152 #define put_user(x, ptr) \
153 ({ \
154  might_fault(); \
155  __put_user(x, ptr); \
156 })
157 
158 
159 extern int __put_user_bad(void) __attribute__((noreturn));
160 
161 #define __get_user(x, ptr) \
162 ({ \
163  int __gu_err = -EFAULT; \
164  __chk_user_ptr(ptr); \
165  switch (sizeof(*(ptr))) { \
166  case 1: { \
167  unsigned char __x; \
168  __gu_err = __get_user_fn(sizeof (*(ptr)), \
169  ptr, &__x); \
170  (x) = *(__force __typeof__(*(ptr)) *) &__x; \
171  break; \
172  }; \
173  case 2: { \
174  unsigned short __x; \
175  __gu_err = __get_user_fn(sizeof (*(ptr)), \
176  ptr, &__x); \
177  (x) = *(__force __typeof__(*(ptr)) *) &__x; \
178  break; \
179  }; \
180  case 4: { \
181  unsigned int __x; \
182  __gu_err = __get_user_fn(sizeof (*(ptr)), \
183  ptr, &__x); \
184  (x) = *(__force __typeof__(*(ptr)) *) &__x; \
185  break; \
186  }; \
187  case 8: { \
188  unsigned long long __x; \
189  __gu_err = __get_user_fn(sizeof (*(ptr)), \
190  ptr, &__x); \
191  (x) = *(__force __typeof__(*(ptr)) *) &__x; \
192  break; \
193  }; \
194  default: \
195  __get_user_bad(); \
196  break; \
197  } \
198  __gu_err; \
199 })
200 
201 #define get_user(x, ptr) \
202 ({ \
203  might_fault(); \
204  __get_user(x, ptr); \
205 })
206 
207 extern int __get_user_bad(void) __attribute__((noreturn));
208 
209 #define __put_user_unaligned __put_user
210 #define __get_user_unaligned __get_user
211 
226 static inline unsigned long __must_check
227 __copy_to_user(void __user *to, const void *from, unsigned long n)
228 {
229  if (__builtin_constant_p(n) && (n <= 256))
230  return uaccess.copy_to_user_small(n, to, from);
231  else
232  return uaccess.copy_to_user(n, to, from);
233 }
234 
235 #define __copy_to_user_inatomic __copy_to_user
236 #define __copy_from_user_inatomic __copy_from_user
237 
251 static inline unsigned long __must_check
252 copy_to_user(void __user *to, const void *from, unsigned long n)
253 {
254  might_fault();
255  if (access_ok(VERIFY_WRITE, to, n))
256  n = __copy_to_user(to, from, n);
257  return n;
258 }
259 
277 static inline unsigned long __must_check
278 __copy_from_user(void *to, const void __user *from, unsigned long n)
279 {
280  if (__builtin_constant_p(n) && (n <= 256))
281  return uaccess.copy_from_user_small(n, from, to);
282  else
283  return uaccess.copy_from_user(n, from, to);
284 }
285 
286 extern void copy_from_user_overflow(void)
287 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
288 __compiletime_warning("copy_from_user() buffer size is not provably correct")
289 #endif
290 ;
291 
308 static inline unsigned long __must_check
309 copy_from_user(void *to, const void __user *from, unsigned long n)
310 {
311  unsigned int sz = __compiletime_object_size(to);
312 
313  might_fault();
314  if (unlikely(sz != -1 && sz < n)) {
316  return n;
317  }
318  if (access_ok(VERIFY_READ, from, n))
319  n = __copy_from_user(to, from, n);
320  else
321  memset(to, 0, n);
322  return n;
323 }
324 
325 static inline unsigned long __must_check
326 __copy_in_user(void __user *to, const void __user *from, unsigned long n)
327 {
328  return uaccess.copy_in_user(n, to, from);
329 }
330 
331 static inline unsigned long __must_check
332 copy_in_user(void __user *to, const void __user *from, unsigned long n)
333 {
334  might_fault();
335  if (__access_ok(from,n) && __access_ok(to,n))
336  n = __copy_in_user(to, from, n);
337  return n;
338 }
339 
340 /*
341  * Copy a null terminated string from userspace.
342  */
343 static inline long __must_check
344 strncpy_from_user(char *dst, const char __user *src, long count)
345 {
346  long res = -EFAULT;
347  might_fault();
348  if (access_ok(VERIFY_READ, src, 1))
349  res = uaccess.strncpy_from_user(count, src, dst);
350  return res;
351 }
352 
353 static inline unsigned long
354 strnlen_user(const char __user * src, unsigned long n)
355 {
356  might_fault();
357  return uaccess.strnlen_user(n, src);
358 }
359 
374 #define strlen_user(str) strnlen_user(str, ~0UL)
375 
376 /*
377  * Zero Userspace
378  */
379 
380 static inline unsigned long __must_check
381 __clear_user(void __user *to, unsigned long n)
382 {
383  return uaccess.clear_user(n, to);
384 }
385 
386 static inline unsigned long __must_check
387 clear_user(void __user *to, unsigned long n)
388 {
389  might_fault();
390  if (access_ok(VERIFY_WRITE, to, n))
391  n = uaccess.clear_user(n, to);
392  return n;
393 }
394 
395 extern int copy_to_user_real(void __user *dest, void *src, size_t count);
396 extern int copy_from_user_real(void *dest, void __user *src, size_t count);
397 
398 #endif /* __S390_UACCESS_H */