Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
usercopy_32.c
Go to the documentation of this file.
1 /*
2  * User address space access functions.
3  * The non inlined parts of asm-i386/uaccess.h are here.
4  *
5  * Copyright 1997 Andi Kleen <[email protected]>
6  * Copyright 1997 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/highmem.h>
10 #include <linux/blkdev.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <linux/interrupt.h>
14 #include <asm/uaccess.h>
15 #include <asm/mmx.h>
16 #include <asm/asm.h>
17 
18 #ifdef CONFIG_X86_INTEL_USERCOPY
19 /*
20  * Alignment at which movsl is preferred for bulk memory copies.
21  */
22 struct movsl_mask movsl_mask __read_mostly;
23 #endif
24 
25 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
26 {
27 #ifdef CONFIG_X86_INTEL_USERCOPY
28  if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
29  return 0;
30 #endif
31  return 1;
32 }
33 #define movsl_is_ok(a1, a2, n) \
34  __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
35 
36 /*
37  * Zero Userspace
38  */
39 
40 #define __do_clear_user(addr,size) \
41 do { \
42  int __d0; \
43  might_fault(); \
44  __asm__ __volatile__( \
45  ASM_STAC "\n" \
46  "0: rep; stosl\n" \
47  " movl %2,%0\n" \
48  "1: rep; stosb\n" \
49  "2: " ASM_CLAC "\n" \
50  ".section .fixup,\"ax\"\n" \
51  "3: lea 0(%2,%0,4),%0\n" \
52  " jmp 2b\n" \
53  ".previous\n" \
54  _ASM_EXTABLE(0b,3b) \
55  _ASM_EXTABLE(1b,2b) \
56  : "=&c"(size), "=&D" (__d0) \
57  : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
58 } while (0)
59 
70 unsigned long
71 clear_user(void __user *to, unsigned long n)
72 {
73  might_fault();
74  if (access_ok(VERIFY_WRITE, to, n))
75  __do_clear_user(to, n);
76  return n;
77 }
79 
91 unsigned long
92 __clear_user(void __user *to, unsigned long n)
93 {
94  __do_clear_user(to, n);
95  return n;
96 }
98 
99 #ifdef CONFIG_X86_INTEL_USERCOPY
100 static unsigned long
101 __copy_user_intel(void __user *to, const void *from, unsigned long size)
102 {
103  int d0, d1;
104  __asm__ __volatile__(
105  " .align 2,0x90\n"
106  "1: movl 32(%4), %%eax\n"
107  " cmpl $67, %0\n"
108  " jbe 3f\n"
109  "2: movl 64(%4), %%eax\n"
110  " .align 2,0x90\n"
111  "3: movl 0(%4), %%eax\n"
112  "4: movl 4(%4), %%edx\n"
113  "5: movl %%eax, 0(%3)\n"
114  "6: movl %%edx, 4(%3)\n"
115  "7: movl 8(%4), %%eax\n"
116  "8: movl 12(%4),%%edx\n"
117  "9: movl %%eax, 8(%3)\n"
118  "10: movl %%edx, 12(%3)\n"
119  "11: movl 16(%4), %%eax\n"
120  "12: movl 20(%4), %%edx\n"
121  "13: movl %%eax, 16(%3)\n"
122  "14: movl %%edx, 20(%3)\n"
123  "15: movl 24(%4), %%eax\n"
124  "16: movl 28(%4), %%edx\n"
125  "17: movl %%eax, 24(%3)\n"
126  "18: movl %%edx, 28(%3)\n"
127  "19: movl 32(%4), %%eax\n"
128  "20: movl 36(%4), %%edx\n"
129  "21: movl %%eax, 32(%3)\n"
130  "22: movl %%edx, 36(%3)\n"
131  "23: movl 40(%4), %%eax\n"
132  "24: movl 44(%4), %%edx\n"
133  "25: movl %%eax, 40(%3)\n"
134  "26: movl %%edx, 44(%3)\n"
135  "27: movl 48(%4), %%eax\n"
136  "28: movl 52(%4), %%edx\n"
137  "29: movl %%eax, 48(%3)\n"
138  "30: movl %%edx, 52(%3)\n"
139  "31: movl 56(%4), %%eax\n"
140  "32: movl 60(%4), %%edx\n"
141  "33: movl %%eax, 56(%3)\n"
142  "34: movl %%edx, 60(%3)\n"
143  " addl $-64, %0\n"
144  " addl $64, %4\n"
145  " addl $64, %3\n"
146  " cmpl $63, %0\n"
147  " ja 1b\n"
148  "35: movl %0, %%eax\n"
149  " shrl $2, %0\n"
150  " andl $3, %%eax\n"
151  " cld\n"
152  "99: rep; movsl\n"
153  "36: movl %%eax, %0\n"
154  "37: rep; movsb\n"
155  "100:\n"
156  ".section .fixup,\"ax\"\n"
157  "101: lea 0(%%eax,%0,4),%0\n"
158  " jmp 100b\n"
159  ".previous\n"
160  _ASM_EXTABLE(1b,100b)
161  _ASM_EXTABLE(2b,100b)
162  _ASM_EXTABLE(3b,100b)
163  _ASM_EXTABLE(4b,100b)
164  _ASM_EXTABLE(5b,100b)
165  _ASM_EXTABLE(6b,100b)
166  _ASM_EXTABLE(7b,100b)
167  _ASM_EXTABLE(8b,100b)
168  _ASM_EXTABLE(9b,100b)
169  _ASM_EXTABLE(10b,100b)
170  _ASM_EXTABLE(11b,100b)
171  _ASM_EXTABLE(12b,100b)
172  _ASM_EXTABLE(13b,100b)
173  _ASM_EXTABLE(14b,100b)
174  _ASM_EXTABLE(15b,100b)
175  _ASM_EXTABLE(16b,100b)
176  _ASM_EXTABLE(17b,100b)
177  _ASM_EXTABLE(18b,100b)
178  _ASM_EXTABLE(19b,100b)
179  _ASM_EXTABLE(20b,100b)
180  _ASM_EXTABLE(21b,100b)
181  _ASM_EXTABLE(22b,100b)
182  _ASM_EXTABLE(23b,100b)
183  _ASM_EXTABLE(24b,100b)
184  _ASM_EXTABLE(25b,100b)
185  _ASM_EXTABLE(26b,100b)
186  _ASM_EXTABLE(27b,100b)
187  _ASM_EXTABLE(28b,100b)
188  _ASM_EXTABLE(29b,100b)
189  _ASM_EXTABLE(30b,100b)
190  _ASM_EXTABLE(31b,100b)
191  _ASM_EXTABLE(32b,100b)
192  _ASM_EXTABLE(33b,100b)
193  _ASM_EXTABLE(34b,100b)
194  _ASM_EXTABLE(35b,100b)
195  _ASM_EXTABLE(36b,100b)
196  _ASM_EXTABLE(37b,100b)
197  _ASM_EXTABLE(99b,101b)
198  : "=&c"(size), "=&D" (d0), "=&S" (d1)
199  : "1"(to), "2"(from), "0"(size)
200  : "eax", "edx", "memory");
201  return size;
202 }
203 
204 static unsigned long
205 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
206 {
207  int d0, d1;
208  __asm__ __volatile__(
209  " .align 2,0x90\n"
210  "0: movl 32(%4), %%eax\n"
211  " cmpl $67, %0\n"
212  " jbe 2f\n"
213  "1: movl 64(%4), %%eax\n"
214  " .align 2,0x90\n"
215  "2: movl 0(%4), %%eax\n"
216  "21: movl 4(%4), %%edx\n"
217  " movl %%eax, 0(%3)\n"
218  " movl %%edx, 4(%3)\n"
219  "3: movl 8(%4), %%eax\n"
220  "31: movl 12(%4),%%edx\n"
221  " movl %%eax, 8(%3)\n"
222  " movl %%edx, 12(%3)\n"
223  "4: movl 16(%4), %%eax\n"
224  "41: movl 20(%4), %%edx\n"
225  " movl %%eax, 16(%3)\n"
226  " movl %%edx, 20(%3)\n"
227  "10: movl 24(%4), %%eax\n"
228  "51: movl 28(%4), %%edx\n"
229  " movl %%eax, 24(%3)\n"
230  " movl %%edx, 28(%3)\n"
231  "11: movl 32(%4), %%eax\n"
232  "61: movl 36(%4), %%edx\n"
233  " movl %%eax, 32(%3)\n"
234  " movl %%edx, 36(%3)\n"
235  "12: movl 40(%4), %%eax\n"
236  "71: movl 44(%4), %%edx\n"
237  " movl %%eax, 40(%3)\n"
238  " movl %%edx, 44(%3)\n"
239  "13: movl 48(%4), %%eax\n"
240  "81: movl 52(%4), %%edx\n"
241  " movl %%eax, 48(%3)\n"
242  " movl %%edx, 52(%3)\n"
243  "14: movl 56(%4), %%eax\n"
244  "91: movl 60(%4), %%edx\n"
245  " movl %%eax, 56(%3)\n"
246  " movl %%edx, 60(%3)\n"
247  " addl $-64, %0\n"
248  " addl $64, %4\n"
249  " addl $64, %3\n"
250  " cmpl $63, %0\n"
251  " ja 0b\n"
252  "5: movl %0, %%eax\n"
253  " shrl $2, %0\n"
254  " andl $3, %%eax\n"
255  " cld\n"
256  "6: rep; movsl\n"
257  " movl %%eax,%0\n"
258  "7: rep; movsb\n"
259  "8:\n"
260  ".section .fixup,\"ax\"\n"
261  "9: lea 0(%%eax,%0,4),%0\n"
262  "16: pushl %0\n"
263  " pushl %%eax\n"
264  " xorl %%eax,%%eax\n"
265  " rep; stosb\n"
266  " popl %%eax\n"
267  " popl %0\n"
268  " jmp 8b\n"
269  ".previous\n"
270  _ASM_EXTABLE(0b,16b)
271  _ASM_EXTABLE(1b,16b)
272  _ASM_EXTABLE(2b,16b)
273  _ASM_EXTABLE(21b,16b)
274  _ASM_EXTABLE(3b,16b)
275  _ASM_EXTABLE(31b,16b)
276  _ASM_EXTABLE(4b,16b)
277  _ASM_EXTABLE(41b,16b)
278  _ASM_EXTABLE(10b,16b)
279  _ASM_EXTABLE(51b,16b)
280  _ASM_EXTABLE(11b,16b)
281  _ASM_EXTABLE(61b,16b)
282  _ASM_EXTABLE(12b,16b)
283  _ASM_EXTABLE(71b,16b)
284  _ASM_EXTABLE(13b,16b)
285  _ASM_EXTABLE(81b,16b)
286  _ASM_EXTABLE(14b,16b)
287  _ASM_EXTABLE(91b,16b)
288  _ASM_EXTABLE(6b,9b)
289  _ASM_EXTABLE(7b,16b)
290  : "=&c"(size), "=&D" (d0), "=&S" (d1)
291  : "1"(to), "2"(from), "0"(size)
292  : "eax", "edx", "memory");
293  return size;
294 }
295 
296 /*
297  * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
299  */
300 
301 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
302  const void __user *from, unsigned long size)
303 {
304  int d0, d1;
305 
306  __asm__ __volatile__(
307  " .align 2,0x90\n"
308  "0: movl 32(%4), %%eax\n"
309  " cmpl $67, %0\n"
310  " jbe 2f\n"
311  "1: movl 64(%4), %%eax\n"
312  " .align 2,0x90\n"
313  "2: movl 0(%4), %%eax\n"
314  "21: movl 4(%4), %%edx\n"
315  " movnti %%eax, 0(%3)\n"
316  " movnti %%edx, 4(%3)\n"
317  "3: movl 8(%4), %%eax\n"
318  "31: movl 12(%4),%%edx\n"
319  " movnti %%eax, 8(%3)\n"
320  " movnti %%edx, 12(%3)\n"
321  "4: movl 16(%4), %%eax\n"
322  "41: movl 20(%4), %%edx\n"
323  " movnti %%eax, 16(%3)\n"
324  " movnti %%edx, 20(%3)\n"
325  "10: movl 24(%4), %%eax\n"
326  "51: movl 28(%4), %%edx\n"
327  " movnti %%eax, 24(%3)\n"
328  " movnti %%edx, 28(%3)\n"
329  "11: movl 32(%4), %%eax\n"
330  "61: movl 36(%4), %%edx\n"
331  " movnti %%eax, 32(%3)\n"
332  " movnti %%edx, 36(%3)\n"
333  "12: movl 40(%4), %%eax\n"
334  "71: movl 44(%4), %%edx\n"
335  " movnti %%eax, 40(%3)\n"
336  " movnti %%edx, 44(%3)\n"
337  "13: movl 48(%4), %%eax\n"
338  "81: movl 52(%4), %%edx\n"
339  " movnti %%eax, 48(%3)\n"
340  " movnti %%edx, 52(%3)\n"
341  "14: movl 56(%4), %%eax\n"
342  "91: movl 60(%4), %%edx\n"
343  " movnti %%eax, 56(%3)\n"
344  " movnti %%edx, 60(%3)\n"
345  " addl $-64, %0\n"
346  " addl $64, %4\n"
347  " addl $64, %3\n"
348  " cmpl $63, %0\n"
349  " ja 0b\n"
350  " sfence \n"
351  "5: movl %0, %%eax\n"
352  " shrl $2, %0\n"
353  " andl $3, %%eax\n"
354  " cld\n"
355  "6: rep; movsl\n"
356  " movl %%eax,%0\n"
357  "7: rep; movsb\n"
358  "8:\n"
359  ".section .fixup,\"ax\"\n"
360  "9: lea 0(%%eax,%0,4),%0\n"
361  "16: pushl %0\n"
362  " pushl %%eax\n"
363  " xorl %%eax,%%eax\n"
364  " rep; stosb\n"
365  " popl %%eax\n"
366  " popl %0\n"
367  " jmp 8b\n"
368  ".previous\n"
369  _ASM_EXTABLE(0b,16b)
370  _ASM_EXTABLE(1b,16b)
371  _ASM_EXTABLE(2b,16b)
372  _ASM_EXTABLE(21b,16b)
373  _ASM_EXTABLE(3b,16b)
374  _ASM_EXTABLE(31b,16b)
375  _ASM_EXTABLE(4b,16b)
376  _ASM_EXTABLE(41b,16b)
377  _ASM_EXTABLE(10b,16b)
378  _ASM_EXTABLE(51b,16b)
379  _ASM_EXTABLE(11b,16b)
380  _ASM_EXTABLE(61b,16b)
381  _ASM_EXTABLE(12b,16b)
382  _ASM_EXTABLE(71b,16b)
383  _ASM_EXTABLE(13b,16b)
384  _ASM_EXTABLE(81b,16b)
385  _ASM_EXTABLE(14b,16b)
386  _ASM_EXTABLE(91b,16b)
387  _ASM_EXTABLE(6b,9b)
388  _ASM_EXTABLE(7b,16b)
389  : "=&c"(size), "=&D" (d0), "=&S" (d1)
390  : "1"(to), "2"(from), "0"(size)
391  : "eax", "edx", "memory");
392  return size;
393 }
394 
395 static unsigned long __copy_user_intel_nocache(void *to,
396  const void __user *from, unsigned long size)
397 {
398  int d0, d1;
399 
400  __asm__ __volatile__(
401  " .align 2,0x90\n"
402  "0: movl 32(%4), %%eax\n"
403  " cmpl $67, %0\n"
404  " jbe 2f\n"
405  "1: movl 64(%4), %%eax\n"
406  " .align 2,0x90\n"
407  "2: movl 0(%4), %%eax\n"
408  "21: movl 4(%4), %%edx\n"
409  " movnti %%eax, 0(%3)\n"
410  " movnti %%edx, 4(%3)\n"
411  "3: movl 8(%4), %%eax\n"
412  "31: movl 12(%4),%%edx\n"
413  " movnti %%eax, 8(%3)\n"
414  " movnti %%edx, 12(%3)\n"
415  "4: movl 16(%4), %%eax\n"
416  "41: movl 20(%4), %%edx\n"
417  " movnti %%eax, 16(%3)\n"
418  " movnti %%edx, 20(%3)\n"
419  "10: movl 24(%4), %%eax\n"
420  "51: movl 28(%4), %%edx\n"
421  " movnti %%eax, 24(%3)\n"
422  " movnti %%edx, 28(%3)\n"
423  "11: movl 32(%4), %%eax\n"
424  "61: movl 36(%4), %%edx\n"
425  " movnti %%eax, 32(%3)\n"
426  " movnti %%edx, 36(%3)\n"
427  "12: movl 40(%4), %%eax\n"
428  "71: movl 44(%4), %%edx\n"
429  " movnti %%eax, 40(%3)\n"
430  " movnti %%edx, 44(%3)\n"
431  "13: movl 48(%4), %%eax\n"
432  "81: movl 52(%4), %%edx\n"
433  " movnti %%eax, 48(%3)\n"
434  " movnti %%edx, 52(%3)\n"
435  "14: movl 56(%4), %%eax\n"
436  "91: movl 60(%4), %%edx\n"
437  " movnti %%eax, 56(%3)\n"
438  " movnti %%edx, 60(%3)\n"
439  " addl $-64, %0\n"
440  " addl $64, %4\n"
441  " addl $64, %3\n"
442  " cmpl $63, %0\n"
443  " ja 0b\n"
444  " sfence \n"
445  "5: movl %0, %%eax\n"
446  " shrl $2, %0\n"
447  " andl $3, %%eax\n"
448  " cld\n"
449  "6: rep; movsl\n"
450  " movl %%eax,%0\n"
451  "7: rep; movsb\n"
452  "8:\n"
453  ".section .fixup,\"ax\"\n"
454  "9: lea 0(%%eax,%0,4),%0\n"
455  "16: jmp 8b\n"
456  ".previous\n"
457  _ASM_EXTABLE(0b,16b)
458  _ASM_EXTABLE(1b,16b)
459  _ASM_EXTABLE(2b,16b)
460  _ASM_EXTABLE(21b,16b)
461  _ASM_EXTABLE(3b,16b)
462  _ASM_EXTABLE(31b,16b)
463  _ASM_EXTABLE(4b,16b)
464  _ASM_EXTABLE(41b,16b)
465  _ASM_EXTABLE(10b,16b)
466  _ASM_EXTABLE(51b,16b)
467  _ASM_EXTABLE(11b,16b)
468  _ASM_EXTABLE(61b,16b)
469  _ASM_EXTABLE(12b,16b)
470  _ASM_EXTABLE(71b,16b)
471  _ASM_EXTABLE(13b,16b)
472  _ASM_EXTABLE(81b,16b)
473  _ASM_EXTABLE(14b,16b)
474  _ASM_EXTABLE(91b,16b)
475  _ASM_EXTABLE(6b,9b)
476  _ASM_EXTABLE(7b,16b)
477  : "=&c"(size), "=&D" (d0), "=&S" (d1)
478  : "1"(to), "2"(from), "0"(size)
479  : "eax", "edx", "memory");
480  return size;
481 }
482 
483 #else
484 
485 /*
486  * Leave these declared but undefined. They should not be any references to
487  * them
488  */
489 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
490  unsigned long size);
491 unsigned long __copy_user_intel(void __user *to, const void *from,
492  unsigned long size);
493 unsigned long __copy_user_zeroing_intel_nocache(void *to,
494  const void __user *from, unsigned long size);
495 #endif /* CONFIG_X86_INTEL_USERCOPY */
496 
497 /* Generic arbitrary sized copy. */
498 #define __copy_user(to, from, size) \
499 do { \
500  int __d0, __d1, __d2; \
501  __asm__ __volatile__( \
502  " cmp $7,%0\n" \
503  " jbe 1f\n" \
504  " movl %1,%0\n" \
505  " negl %0\n" \
506  " andl $7,%0\n" \
507  " subl %0,%3\n" \
508  "4: rep; movsb\n" \
509  " movl %3,%0\n" \
510  " shrl $2,%0\n" \
511  " andl $3,%3\n" \
512  " .align 2,0x90\n" \
513  "0: rep; movsl\n" \
514  " movl %3,%0\n" \
515  "1: rep; movsb\n" \
516  "2:\n" \
517  ".section .fixup,\"ax\"\n" \
518  "5: addl %3,%0\n" \
519  " jmp 2b\n" \
520  "3: lea 0(%3,%0,4),%0\n" \
521  " jmp 2b\n" \
522  ".previous\n" \
523  _ASM_EXTABLE(4b,5b) \
524  _ASM_EXTABLE(0b,3b) \
525  _ASM_EXTABLE(1b,2b) \
526  : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
527  : "3"(size), "0"(size), "1"(to), "2"(from) \
528  : "memory"); \
529 } while (0)
530 
531 #define __copy_user_zeroing(to, from, size) \
532 do { \
533  int __d0, __d1, __d2; \
534  __asm__ __volatile__( \
535  " cmp $7,%0\n" \
536  " jbe 1f\n" \
537  " movl %1,%0\n" \
538  " negl %0\n" \
539  " andl $7,%0\n" \
540  " subl %0,%3\n" \
541  "4: rep; movsb\n" \
542  " movl %3,%0\n" \
543  " shrl $2,%0\n" \
544  " andl $3,%3\n" \
545  " .align 2,0x90\n" \
546  "0: rep; movsl\n" \
547  " movl %3,%0\n" \
548  "1: rep; movsb\n" \
549  "2:\n" \
550  ".section .fixup,\"ax\"\n" \
551  "5: addl %3,%0\n" \
552  " jmp 6f\n" \
553  "3: lea 0(%3,%0,4),%0\n" \
554  "6: pushl %0\n" \
555  " pushl %%eax\n" \
556  " xorl %%eax,%%eax\n" \
557  " rep; stosb\n" \
558  " popl %%eax\n" \
559  " popl %0\n" \
560  " jmp 2b\n" \
561  ".previous\n" \
562  _ASM_EXTABLE(4b,5b) \
563  _ASM_EXTABLE(0b,3b) \
564  _ASM_EXTABLE(1b,6b) \
565  : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
566  : "3"(size), "0"(size), "1"(to), "2"(from) \
567  : "memory"); \
568 } while (0)
569 
570 unsigned long __copy_to_user_ll(void __user *to, const void *from,
571  unsigned long n)
572 {
573 #ifndef CONFIG_X86_WP_WORKS_OK
574  if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
575  ((unsigned long)to) < TASK_SIZE) {
576  /*
577  * When we are in an atomic section (see
578  * mm/filemap.c:file_read_actor), return the full
579  * length to take the slow path.
580  */
581  if (in_atomic())
582  return n;
583 
584  /*
585  * CPU does not honor the WP bit when writing
586  * from supervisory mode, and due to preemption or SMP,
587  * the page tables can change at any time.
588  * Do it manually. Manfred <[email protected]>
589  */
590  while (n) {
591  unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
592  unsigned long len = PAGE_SIZE - offset;
593  int retval;
594  struct page *pg;
595  void *maddr;
596 
597  if (len > n)
598  len = n;
599 
600 survive:
601  down_read(&current->mm->mmap_sem);
602  retval = get_user_pages(current, current->mm,
603  (unsigned long)to, 1, 1, 0, &pg, NULL);
604 
605  if (retval == -ENOMEM && is_global_init(current)) {
606  up_read(&current->mm->mmap_sem);
608  goto survive;
609  }
610 
611  if (retval != 1) {
612  up_read(&current->mm->mmap_sem);
613  break;
614  }
615 
616  maddr = kmap_atomic(pg);
617  memcpy(maddr + offset, from, len);
618  kunmap_atomic(maddr);
620  put_page(pg);
621  up_read(&current->mm->mmap_sem);
622 
623  from += len;
624  to += len;
625  n -= len;
626  }
627  return n;
628  }
629 #endif
630  stac();
631  if (movsl_is_ok(to, from, n))
632  __copy_user(to, from, n);
633  else
634  n = __copy_user_intel(to, from, n);
635  clac();
636  return n;
637 }
639 
640 unsigned long __copy_from_user_ll(void *to, const void __user *from,
641  unsigned long n)
642 {
643  stac();
644  if (movsl_is_ok(to, from, n))
645  __copy_user_zeroing(to, from, n);
646  else
647  n = __copy_user_zeroing_intel(to, from, n);
648  clac();
649  return n;
650 }
652 
653 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
654  unsigned long n)
655 {
656  stac();
657  if (movsl_is_ok(to, from, n))
658  __copy_user(to, from, n);
659  else
660  n = __copy_user_intel((void __user *)to,
661  (const void *)from, n);
662  clac();
663  return n;
664 }
666 
667 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
668  unsigned long n)
669 {
670  stac();
671 #ifdef CONFIG_X86_INTEL_USERCOPY
672  if (n > 64 && cpu_has_xmm2)
673  n = __copy_user_zeroing_intel_nocache(to, from, n);
674  else
675  __copy_user_zeroing(to, from, n);
676 #else
677  __copy_user_zeroing(to, from, n);
678 #endif
679  clac();
680  return n;
681 }
683 
684 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
685  unsigned long n)
686 {
687  stac();
688 #ifdef CONFIG_X86_INTEL_USERCOPY
689  if (n > 64 && cpu_has_xmm2)
690  n = __copy_user_intel_nocache(to, from, n);
691  else
692  __copy_user(to, from, n);
693 #else
694  __copy_user(to, from, n);
695 #endif
696  clac();
697  return n;
698 }
700 
714 unsigned long
715 copy_to_user(void __user *to, const void *from, unsigned long n)
716 {
717  if (access_ok(VERIFY_WRITE, to, n))
718  n = __copy_to_user(to, from, n);
719  return n;
720 }
722 
739 unsigned long
740 _copy_from_user(void *to, const void __user *from, unsigned long n)
741 {
742  if (access_ok(VERIFY_READ, from, n))
743  n = __copy_from_user(to, from, n);
744  else
745  memset(to, 0, n);
746  return n;
747 }
748 EXPORT_SYMBOL(_copy_from_user);
749 
751 {
752  WARN(1, "Buffer overflow detected!\n");
753 }