Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
uaccess.h
Go to the documentation of this file.
1 /*
2  * Authors: Hans-Peter Nilsson ([email protected])
3  *
4  */
5 #ifndef _CRIS_ARCH_UACCESS_H
6 #define _CRIS_ARCH_UACCESS_H
7 
8 /*
9  * We don't tell gcc that we are accessing memory, but this is OK
10  * because we do not write to any memory gcc knows about, so there
11  * are no aliasing issues.
12  *
13  * Note that PC at a fault is the address *at* the faulting
14  * instruction for CRISv32.
15  */
16 #define __put_user_asm(x, addr, err, op) \
17  __asm__ __volatile__( \
18  "2: "op" %1,[%2]\n" \
19  "4:\n" \
20  " .section .fixup,\"ax\"\n" \
21  "3: move.d %3,%0\n" \
22  " jump 4b\n" \
23  " nop\n" \
24  " .previous\n" \
25  " .section __ex_table,\"a\"\n" \
26  " .dword 2b,3b\n" \
27  " .previous\n" \
28  : "=r" (err) \
29  : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
30 
31 #define __put_user_asm_64(x, addr, err) do { \
32  int dummy_for_put_user_asm_64_; \
33  __asm__ __volatile__( \
34  "2: move.d %M2,[%1+]\n" \
35  "4: move.d %H2,[%1]\n" \
36  "5:\n" \
37  " .section .fixup,\"ax\"\n" \
38  "3: move.d %4,%0\n" \
39  " jump 5b\n" \
40  " .previous\n" \
41  " .section __ex_table,\"a\"\n" \
42  " .dword 2b,3b\n" \
43  " .dword 4b,3b\n" \
44  " .previous\n" \
45  : "=r" (err), "=b" (dummy_for_put_user_asm_64_) \
46  : "r" (x), "1" (addr), "g" (-EFAULT), \
47  "0" (err)); \
48  } while (0)
49 
50 /* See comment before __put_user_asm. */
51 
52 #define __get_user_asm(x, addr, err, op) \
53  __asm__ __volatile__( \
54  "2: "op" [%2],%1\n" \
55  "4:\n" \
56  " .section .fixup,\"ax\"\n" \
57  "3: move.d %3,%0\n" \
58  " jump 4b\n" \
59  " moveq 0,%1\n" \
60  " .previous\n" \
61  " .section __ex_table,\"a\"\n" \
62  " .dword 2b,3b\n" \
63  " .previous\n" \
64  : "=r" (err), "=r" (x) \
65  : "r" (addr), "g" (-EFAULT), "0" (err))
66 
67 #define __get_user_asm_64(x, addr, err) do { \
68  int dummy_for_get_user_asm_64_; \
69  __asm__ __volatile__( \
70  "2: move.d [%2+],%M1\n" \
71  "4: move.d [%2],%H1\n" \
72  "5:\n" \
73  " .section .fixup,\"ax\"\n" \
74  "3: move.d %4,%0\n" \
75  " jump 5b\n" \
76  " moveq 0,%1\n" \
77  " .previous\n" \
78  " .section __ex_table,\"a\"\n" \
79  " .dword 2b,3b\n" \
80  " .dword 4b,3b\n" \
81  " .previous\n" \
82  : "=r" (err), "=r" (x), \
83  "=b" (dummy_for_get_user_asm_64_) \
84  : "2" (addr), "g" (-EFAULT), "0" (err));\
85  } while (0)
86 
87 /*
88  * Copy a null terminated string from userspace.
89  *
90  * Must return:
91  * -EFAULT for an exception
92  * count if we hit the buffer limit
93  * bytes copied if we hit a null byte
94  * (without the null byte)
95  */
96 static inline long
97 __do_strncpy_from_user(char *dst, const char *src, long count)
98 {
99  long res;
100 
101  if (count == 0)
102  return 0;
103 
104  /*
105  * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
106  * So do we.
107  *
108  * This code is deduced from:
109  *
110  * char tmp2;
111  * long tmp1, tmp3;
112  * tmp1 = count;
113  * while ((*dst++ = (tmp2 = *src++)) != 0
114  * && --tmp1)
115  * ;
116  *
117  * res = count - tmp1;
118  *
119  * with tweaks.
120  */
121 
122  __asm__ __volatile__ (
123  " move.d %3,%0\n"
124  "5: move.b [%2+],$acr\n"
125  "1: beq 6f\n"
126  " move.b $acr,[%1+]\n"
127 
128  " subq 1,%0\n"
129  "2: bne 1b\n"
130  " move.b [%2+],$acr\n"
131 
132  "6: sub.d %3,%0\n"
133  " neg.d %0,%0\n"
134  "3:\n"
135  " .section .fixup,\"ax\"\n"
136  "4: move.d %7,%0\n"
137  " jump 3b\n"
138  " nop\n"
139 
140  /* The address for a fault at the first move is trivial.
141  The address for a fault at the second move is that of
142  the preceding branch insn, since the move insn is in
143  its delay-slot. Just so you don't get confused... */
144  " .previous\n"
145  " .section __ex_table,\"a\"\n"
146  " .dword 5b,4b\n"
147  " .dword 2b,4b\n"
148  " .previous"
149  : "=r" (res), "=b" (dst), "=b" (src), "=r" (count)
150  : "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
151  : "acr");
152 
153  return res;
154 }
155 
156 /* A few copy asms to build up the more complex ones from.
157 
158  Note again, a post-increment is performed regardless of whether a bus
159  fault occurred in that instruction, and PC for a faulted insn is the
160  address for the insn, or for the preceding branch when in a delay-slot. */
161 
162 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
163  __asm__ __volatile__ ( \
164  COPY \
165  "1:\n" \
166  " .section .fixup,\"ax\"\n" \
167  FIXUP \
168  " .previous\n" \
169  " .section __ex_table,\"a\"\n" \
170  TENTRY \
171  " .previous\n" \
172  : "=b" (to), "=b" (from), "=r" (ret) \
173  : "0" (to), "1" (from), "2" (ret) \
174  : "acr", "memory")
175 
176 #define __asm_copy_from_user_1(to, from, ret) \
177  __asm_copy_user_cont(to, from, ret, \
178  "2: move.b [%1+],$acr\n" \
179  " move.b $acr,[%0+]\n", \
180  "3: addq 1,%2\n" \
181  " jump 1b\n" \
182  " clear.b [%0+]\n", \
183  " .dword 2b,3b\n")
184 
185 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186  __asm_copy_user_cont(to, from, ret, \
187  COPY \
188  "2: move.w [%1+],$acr\n" \
189  " move.w $acr,[%0+]\n", \
190  FIXUP \
191  "3: addq 2,%2\n" \
192  " jump 1b\n" \
193  " clear.w [%0+]\n", \
194  TENTRY \
195  " .dword 2b,3b\n")
196 
197 #define __asm_copy_from_user_2(to, from, ret) \
198  __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
199 
200 #define __asm_copy_from_user_3(to, from, ret) \
201  __asm_copy_from_user_2x_cont(to, from, ret, \
202  "4: move.b [%1+],$acr\n" \
203  " move.b $acr,[%0+]\n", \
204  "5: addq 1,%2\n" \
205  " clear.b [%0+]\n", \
206  " .dword 4b,5b\n")
207 
208 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
209  __asm_copy_user_cont(to, from, ret, \
210  COPY \
211  "2: move.d [%1+],$acr\n" \
212  " move.d $acr,[%0+]\n", \
213  FIXUP \
214  "3: addq 4,%2\n" \
215  " jump 1b\n" \
216  " clear.d [%0+]\n", \
217  TENTRY \
218  " .dword 2b,3b\n")
219 
220 #define __asm_copy_from_user_4(to, from, ret) \
221  __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
222 
223 #define __asm_copy_from_user_5(to, from, ret) \
224  __asm_copy_from_user_4x_cont(to, from, ret, \
225  "4: move.b [%1+],$acr\n" \
226  " move.b $acr,[%0+]\n", \
227  "5: addq 1,%2\n" \
228  " clear.b [%0+]\n", \
229  " .dword 4b,5b\n")
230 
231 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
232  __asm_copy_from_user_4x_cont(to, from, ret, \
233  COPY \
234  "4: move.w [%1+],$acr\n" \
235  " move.w $acr,[%0+]\n", \
236  FIXUP \
237  "5: addq 2,%2\n" \
238  " clear.w [%0+]\n", \
239  TENTRY \
240  " .dword 4b,5b\n")
241 
242 #define __asm_copy_from_user_6(to, from, ret) \
243  __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
244 
245 #define __asm_copy_from_user_7(to, from, ret) \
246  __asm_copy_from_user_6x_cont(to, from, ret, \
247  "6: move.b [%1+],$acr\n" \
248  " move.b $acr,[%0+]\n", \
249  "7: addq 1,%2\n" \
250  " clear.b [%0+]\n", \
251  " .dword 6b,7b\n")
252 
253 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
254  __asm_copy_from_user_4x_cont(to, from, ret, \
255  COPY \
256  "4: move.d [%1+],$acr\n" \
257  " move.d $acr,[%0+]\n", \
258  FIXUP \
259  "5: addq 4,%2\n" \
260  " clear.d [%0+]\n", \
261  TENTRY \
262  " .dword 4b,5b\n")
263 
264 #define __asm_copy_from_user_8(to, from, ret) \
265  __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
266 
267 #define __asm_copy_from_user_9(to, from, ret) \
268  __asm_copy_from_user_8x_cont(to, from, ret, \
269  "6: move.b [%1+],$acr\n" \
270  " move.b $acr,[%0+]\n", \
271  "7: addq 1,%2\n" \
272  " clear.b [%0+]\n", \
273  " .dword 6b,7b\n")
274 
275 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
276  __asm_copy_from_user_8x_cont(to, from, ret, \
277  COPY \
278  "6: move.w [%1+],$acr\n" \
279  " move.w $acr,[%0+]\n", \
280  FIXUP \
281  "7: addq 2,%2\n" \
282  " clear.w [%0+]\n", \
283  TENTRY \
284  " .dword 6b,7b\n")
285 
286 #define __asm_copy_from_user_10(to, from, ret) \
287  __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
288 
289 #define __asm_copy_from_user_11(to, from, ret) \
290  __asm_copy_from_user_10x_cont(to, from, ret, \
291  "8: move.b [%1+],$acr\n" \
292  " move.b $acr,[%0+]\n", \
293  "9: addq 1,%2\n" \
294  " clear.b [%0+]\n", \
295  " .dword 8b,9b\n")
296 
297 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
298  __asm_copy_from_user_8x_cont(to, from, ret, \
299  COPY \
300  "6: move.d [%1+],$acr\n" \
301  " move.d $acr,[%0+]\n", \
302  FIXUP \
303  "7: addq 4,%2\n" \
304  " clear.d [%0+]\n", \
305  TENTRY \
306  " .dword 6b,7b\n")
307 
308 #define __asm_copy_from_user_12(to, from, ret) \
309  __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
310 
311 #define __asm_copy_from_user_13(to, from, ret) \
312  __asm_copy_from_user_12x_cont(to, from, ret, \
313  "8: move.b [%1+],$acr\n" \
314  " move.b $acr,[%0+]\n", \
315  "9: addq 1,%2\n" \
316  " clear.b [%0+]\n", \
317  " .dword 8b,9b\n")
318 
319 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
320  __asm_copy_from_user_12x_cont(to, from, ret, \
321  COPY \
322  "8: move.w [%1+],$acr\n" \
323  " move.w $acr,[%0+]\n", \
324  FIXUP \
325  "9: addq 2,%2\n" \
326  " clear.w [%0+]\n", \
327  TENTRY \
328  " .dword 8b,9b\n")
329 
330 #define __asm_copy_from_user_14(to, from, ret) \
331  __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
332 
333 #define __asm_copy_from_user_15(to, from, ret) \
334  __asm_copy_from_user_14x_cont(to, from, ret, \
335  "10: move.b [%1+],$acr\n" \
336  " move.b $acr,[%0+]\n", \
337  "11: addq 1,%2\n" \
338  " clear.b [%0+]\n", \
339  " .dword 10b,11b\n")
340 
341 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
342  __asm_copy_from_user_12x_cont(to, from, ret, \
343  COPY \
344  "8: move.d [%1+],$acr\n" \
345  " move.d $acr,[%0+]\n", \
346  FIXUP \
347  "9: addq 4,%2\n" \
348  " clear.d [%0+]\n", \
349  TENTRY \
350  " .dword 8b,9b\n")
351 
352 #define __asm_copy_from_user_16(to, from, ret) \
353  __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
354 
355 #define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
356  __asm_copy_from_user_16x_cont(to, from, ret, \
357  COPY \
358  "10: move.d [%1+],$acr\n" \
359  " move.d $acr,[%0+]\n", \
360  FIXUP \
361  "11: addq 4,%2\n" \
362  " clear.d [%0+]\n", \
363  TENTRY \
364  " .dword 10b,11b\n")
365 
366 #define __asm_copy_from_user_20(to, from, ret) \
367  __asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
368 
369 #define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
370  __asm_copy_from_user_20x_cont(to, from, ret, \
371  COPY \
372  "12: move.d [%1+],$acr\n" \
373  " move.d $acr,[%0+]\n", \
374  FIXUP \
375  "13: addq 4,%2\n" \
376  " clear.d [%0+]\n", \
377  TENTRY \
378  " .dword 12b,13b\n")
379 
380 #define __asm_copy_from_user_24(to, from, ret) \
381  __asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
382 
383 /* And now, the to-user ones. */
384 
385 #define __asm_copy_to_user_1(to, from, ret) \
386  __asm_copy_user_cont(to, from, ret, \
387  " move.b [%1+],$acr\n" \
388  "2: move.b $acr,[%0+]\n", \
389  "3: jump 1b\n" \
390  " addq 1,%2\n", \
391  " .dword 2b,3b\n")
392 
393 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
394  __asm_copy_user_cont(to, from, ret, \
395  COPY \
396  " move.w [%1+],$acr\n" \
397  "2: move.w $acr,[%0+]\n", \
398  FIXUP \
399  "3: jump 1b\n" \
400  " addq 2,%2\n", \
401  TENTRY \
402  " .dword 2b,3b\n")
403 
404 #define __asm_copy_to_user_2(to, from, ret) \
405  __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
406 
407 #define __asm_copy_to_user_3(to, from, ret) \
408  __asm_copy_to_user_2x_cont(to, from, ret, \
409  " move.b [%1+],$acr\n" \
410  "4: move.b $acr,[%0+]\n", \
411  "5: addq 1,%2\n", \
412  " .dword 4b,5b\n")
413 
414 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
415  __asm_copy_user_cont(to, from, ret, \
416  COPY \
417  " move.d [%1+],$acr\n" \
418  "2: move.d $acr,[%0+]\n", \
419  FIXUP \
420  "3: jump 1b\n" \
421  " addq 4,%2\n", \
422  TENTRY \
423  " .dword 2b,3b\n")
424 
425 #define __asm_copy_to_user_4(to, from, ret) \
426  __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
427 
428 #define __asm_copy_to_user_5(to, from, ret) \
429  __asm_copy_to_user_4x_cont(to, from, ret, \
430  " move.b [%1+],$acr\n" \
431  "4: move.b $acr,[%0+]\n", \
432  "5: addq 1,%2\n", \
433  " .dword 4b,5b\n")
434 
435 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
436  __asm_copy_to_user_4x_cont(to, from, ret, \
437  COPY \
438  " move.w [%1+],$acr\n" \
439  "4: move.w $acr,[%0+]\n", \
440  FIXUP \
441  "5: addq 2,%2\n", \
442  TENTRY \
443  " .dword 4b,5b\n")
444 
445 #define __asm_copy_to_user_6(to, from, ret) \
446  __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
447 
448 #define __asm_copy_to_user_7(to, from, ret) \
449  __asm_copy_to_user_6x_cont(to, from, ret, \
450  " move.b [%1+],$acr\n" \
451  "6: move.b $acr,[%0+]\n", \
452  "7: addq 1,%2\n", \
453  " .dword 6b,7b\n")
454 
455 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
456  __asm_copy_to_user_4x_cont(to, from, ret, \
457  COPY \
458  " move.d [%1+],$acr\n" \
459  "4: move.d $acr,[%0+]\n", \
460  FIXUP \
461  "5: addq 4,%2\n", \
462  TENTRY \
463  " .dword 4b,5b\n")
464 
465 #define __asm_copy_to_user_8(to, from, ret) \
466  __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
467 
468 #define __asm_copy_to_user_9(to, from, ret) \
469  __asm_copy_to_user_8x_cont(to, from, ret, \
470  " move.b [%1+],$acr\n" \
471  "6: move.b $acr,[%0+]\n", \
472  "7: addq 1,%2\n", \
473  " .dword 6b,7b\n")
474 
475 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
476  __asm_copy_to_user_8x_cont(to, from, ret, \
477  COPY \
478  " move.w [%1+],$acr\n" \
479  "6: move.w $acr,[%0+]\n", \
480  FIXUP \
481  "7: addq 2,%2\n", \
482  TENTRY \
483  " .dword 6b,7b\n")
484 
485 #define __asm_copy_to_user_10(to, from, ret) \
486  __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
487 
488 #define __asm_copy_to_user_11(to, from, ret) \
489  __asm_copy_to_user_10x_cont(to, from, ret, \
490  " move.b [%1+],$acr\n" \
491  "8: move.b $acr,[%0+]\n", \
492  "9: addq 1,%2\n", \
493  " .dword 8b,9b\n")
494 
495 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
496  __asm_copy_to_user_8x_cont(to, from, ret, \
497  COPY \
498  " move.d [%1+],$acr\n" \
499  "6: move.d $acr,[%0+]\n", \
500  FIXUP \
501  "7: addq 4,%2\n", \
502  TENTRY \
503  " .dword 6b,7b\n")
504 
505 #define __asm_copy_to_user_12(to, from, ret) \
506  __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
507 
508 #define __asm_copy_to_user_13(to, from, ret) \
509  __asm_copy_to_user_12x_cont(to, from, ret, \
510  " move.b [%1+],$acr\n" \
511  "8: move.b $acr,[%0+]\n", \
512  "9: addq 1,%2\n", \
513  " .dword 8b,9b\n")
514 
515 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
516  __asm_copy_to_user_12x_cont(to, from, ret, \
517  COPY \
518  " move.w [%1+],$acr\n" \
519  "8: move.w $acr,[%0+]\n", \
520  FIXUP \
521  "9: addq 2,%2\n", \
522  TENTRY \
523  " .dword 8b,9b\n")
524 
525 #define __asm_copy_to_user_14(to, from, ret) \
526  __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
527 
528 #define __asm_copy_to_user_15(to, from, ret) \
529  __asm_copy_to_user_14x_cont(to, from, ret, \
530  " move.b [%1+],$acr\n" \
531  "10: move.b $acr,[%0+]\n", \
532  "11: addq 1,%2\n", \
533  " .dword 10b,11b\n")
534 
535 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
536  __asm_copy_to_user_12x_cont(to, from, ret, \
537  COPY \
538  " move.d [%1+],$acr\n" \
539  "8: move.d $acr,[%0+]\n", \
540  FIXUP \
541  "9: addq 4,%2\n", \
542  TENTRY \
543  " .dword 8b,9b\n")
544 
545 #define __asm_copy_to_user_16(to, from, ret) \
546  __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
547 
548 #define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
549  __asm_copy_to_user_16x_cont(to, from, ret, \
550  COPY \
551  " move.d [%1+],$acr\n" \
552  "10: move.d $acr,[%0+]\n", \
553  FIXUP \
554  "11: addq 4,%2\n", \
555  TENTRY \
556  " .dword 10b,11b\n")
557 
558 #define __asm_copy_to_user_20(to, from, ret) \
559  __asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
560 
561 #define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
562  __asm_copy_to_user_20x_cont(to, from, ret, \
563  COPY \
564  " move.d [%1+],$acr\n" \
565  "12: move.d $acr,[%0+]\n", \
566  FIXUP \
567  "13: addq 4,%2\n", \
568  TENTRY \
569  " .dword 12b,13b\n")
570 
571 #define __asm_copy_to_user_24(to, from, ret) \
572  __asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
573 
574 /* Define a few clearing asms with exception handlers. */
575 
576 /* This frame-asm is like the __asm_copy_user_cont one, but has one less
577  input. */
578 
579 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
580  __asm__ __volatile__ ( \
581  CLEAR \
582  "1:\n" \
583  " .section .fixup,\"ax\"\n" \
584  FIXUP \
585  " .previous\n" \
586  " .section __ex_table,\"a\"\n" \
587  TENTRY \
588  " .previous" \
589  : "=b" (to), "=r" (ret) \
590  : "0" (to), "1" (ret) \
591  : "memory")
592 
593 #define __asm_clear_1(to, ret) \
594  __asm_clear(to, ret, \
595  "2: clear.b [%0+]\n", \
596  "3: jump 1b\n" \
597  " addq 1,%1\n", \
598  " .dword 2b,3b\n")
599 
600 #define __asm_clear_2(to, ret) \
601  __asm_clear(to, ret, \
602  "2: clear.w [%0+]\n", \
603  "3: jump 1b\n" \
604  " addq 2,%1\n", \
605  " .dword 2b,3b\n")
606 
607 #define __asm_clear_3(to, ret) \
608  __asm_clear(to, ret, \
609  "2: clear.w [%0+]\n" \
610  "3: clear.b [%0+]\n", \
611  "4: addq 2,%1\n" \
612  "5: jump 1b\n" \
613  " addq 1,%1\n", \
614  " .dword 2b,4b\n" \
615  " .dword 3b,5b\n")
616 
617 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
618  __asm_clear(to, ret, \
619  CLEAR \
620  "2: clear.d [%0+]\n", \
621  FIXUP \
622  "3: jump 1b\n" \
623  " addq 4,%1\n", \
624  TENTRY \
625  " .dword 2b,3b\n")
626 
627 #define __asm_clear_4(to, ret) \
628  __asm_clear_4x_cont(to, ret, "", "", "")
629 
630 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
631  __asm_clear_4x_cont(to, ret, \
632  CLEAR \
633  "4: clear.d [%0+]\n", \
634  FIXUP \
635  "5: addq 4,%1\n", \
636  TENTRY \
637  " .dword 4b,5b\n")
638 
639 #define __asm_clear_8(to, ret) \
640  __asm_clear_8x_cont(to, ret, "", "", "")
641 
642 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
643  __asm_clear_8x_cont(to, ret, \
644  CLEAR \
645  "6: clear.d [%0+]\n", \
646  FIXUP \
647  "7: addq 4,%1\n", \
648  TENTRY \
649  " .dword 6b,7b\n")
650 
651 #define __asm_clear_12(to, ret) \
652  __asm_clear_12x_cont(to, ret, "", "", "")
653 
654 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
655  __asm_clear_12x_cont(to, ret, \
656  CLEAR \
657  "8: clear.d [%0+]\n", \
658  FIXUP \
659  "9: addq 4,%1\n", \
660  TENTRY \
661  " .dword 8b,9b\n")
662 
663 #define __asm_clear_16(to, ret) \
664  __asm_clear_16x_cont(to, ret, "", "", "")
665 
666 #define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
667  __asm_clear_16x_cont(to, ret, \
668  CLEAR \
669  "10: clear.d [%0+]\n", \
670  FIXUP \
671  "11: addq 4,%1\n", \
672  TENTRY \
673  " .dword 10b,11b\n")
674 
675 #define __asm_clear_20(to, ret) \
676  __asm_clear_20x_cont(to, ret, "", "", "")
677 
678 #define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
679  __asm_clear_20x_cont(to, ret, \
680  CLEAR \
681  "12: clear.d [%0+]\n", \
682  FIXUP \
683  "13: addq 4,%1\n", \
684  TENTRY \
685  " .dword 12b,13b\n")
686 
687 #define __asm_clear_24(to, ret) \
688  __asm_clear_24x_cont(to, ret, "", "", "")
689 
690 /*
691  * Return the size of a string (including the ending 0)
692  *
693  * Return length of string in userspace including terminating 0
694  * or 0 for error. Return a value greater than N if too long.
695  */
696 
697 static inline long
698 strnlen_user(const char *s, long n)
699 {
700  long res, tmp1;
701 
702  if (!access_ok(VERIFY_READ, s, 0))
703  return 0;
704 
705  /*
706  * This code is deduced from:
707  *
708  * tmp1 = n;
709  * while (tmp1-- > 0 && *s++)
710  * ;
711  *
712  * res = n - tmp1;
713  *
714  * (with tweaks).
715  */
716 
717  __asm__ __volatile__ (
718  " move.d %1,$acr\n"
719  " cmpq 0,$acr\n"
720  "0:\n"
721  " ble 1f\n"
722  " subq 1,$acr\n"
723 
724  "4: test.b [%0+]\n"
725  " bne 0b\n"
726  " cmpq 0,$acr\n"
727  "1:\n"
728  " move.d %1,%0\n"
729  " sub.d $acr,%0\n"
730  "2:\n"
731  " .section .fixup,\"ax\"\n"
732 
733  "3: jump 2b\n"
734  " clear.d %0\n"
735 
736  " .previous\n"
737  " .section __ex_table,\"a\"\n"
738  " .dword 4b,3b\n"
739  " .previous\n"
740  : "=r" (res), "=r" (tmp1)
741  : "0" (s), "1" (n)
742  : "acr");
743 
744  return res;
745 }
746 
747 #endif