Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sys_sparc_64.c
Go to the documentation of this file.
1 /* linux/arch/sparc64/kernel/sys_sparc.c
2  *
3  * This file contains various random system calls that
4  * have a non-standard calling sequence on the Linux/sparc
5  * platform.
6  */
7 
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26 #include <linux/export.h>
27 
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/unistd.h>
31 
32 #include "entry.h"
33 #include "systbls.h"
34 
35 /* #define DEBUG_UNIMP_SYSCALL */
36 
37 asmlinkage unsigned long sys_getpagesize(void)
38 {
39  return PAGE_SIZE;
40 }
41 
42 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
43 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
44 
45 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
46  * overflow past the end of the 64-bit address space?
47  */
48 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
49 {
50  unsigned long va_exclude_start, va_exclude_end;
51 
52  va_exclude_start = VA_EXCLUDE_START;
53  va_exclude_end = VA_EXCLUDE_END;
54 
55  if (unlikely(len >= va_exclude_start))
56  return 1;
57 
58  if (unlikely((addr + len) < addr))
59  return 1;
60 
61  if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
62  ((addr + len) >= va_exclude_start &&
63  (addr + len) < va_exclude_end)))
64  return 1;
65 
66  return 0;
67 }
68 
69 /* These functions differ from the default implementations in
70  * mm/mmap.c in two ways:
71  *
72  * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
73  * for fixed such mappings we just validate what the user gave us.
74  * 2) For 64-bit tasks we avoid mapping anything within 4GB of
75  * the spitfire/niagara VA-hole.
76  */
77 
78 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
79  unsigned long pgoff)
80 {
81  unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
82  unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
83 
84  return base + off;
85 }
86 
87 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
88  unsigned long pgoff)
89 {
90  unsigned long base = addr & ~(SHMLBA-1);
91  unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
92 
93  if (base + off <= addr)
94  return base + off;
95  return base - off;
96 }
97 
98 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
99 {
100  struct mm_struct *mm = current->mm;
101  struct vm_area_struct * vma;
102  unsigned long task_size = TASK_SIZE;
103  unsigned long start_addr;
104  int do_color_align;
105 
106  if (flags & MAP_FIXED) {
107  /* We do not accept a shared mapping if it would violate
108  * cache aliasing constraints.
109  */
110  if ((flags & MAP_SHARED) &&
111  ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
112  return -EINVAL;
113  return addr;
114  }
115 
116  if (test_thread_flag(TIF_32BIT))
117  task_size = STACK_TOP32;
118  if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
119  return -ENOMEM;
120 
121  do_color_align = 0;
122  if (filp || (flags & MAP_SHARED))
123  do_color_align = 1;
124 
125  if (addr) {
126  if (do_color_align)
127  addr = COLOUR_ALIGN(addr, pgoff);
128  else
129  addr = PAGE_ALIGN(addr);
130 
131  vma = find_vma(mm, addr);
132  if (task_size - len >= addr &&
133  (!vma || addr + len <= vma->vm_start))
134  return addr;
135  }
136 
137  if (len > mm->cached_hole_size) {
138  start_addr = addr = mm->free_area_cache;
139  } else {
140  start_addr = addr = TASK_UNMAPPED_BASE;
141  mm->cached_hole_size = 0;
142  }
143 
144  task_size -= len;
145 
146 full_search:
147  if (do_color_align)
148  addr = COLOUR_ALIGN(addr, pgoff);
149  else
150  addr = PAGE_ALIGN(addr);
151 
152  for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
153  /* At this point: (!vma || addr < vma->vm_end). */
154  if (addr < VA_EXCLUDE_START &&
155  (addr + len) >= VA_EXCLUDE_START) {
156  addr = VA_EXCLUDE_END;
157  vma = find_vma(mm, VA_EXCLUDE_END);
158  }
159  if (unlikely(task_size < addr)) {
160  if (start_addr != TASK_UNMAPPED_BASE) {
161  start_addr = addr = TASK_UNMAPPED_BASE;
162  mm->cached_hole_size = 0;
163  goto full_search;
164  }
165  return -ENOMEM;
166  }
167  if (likely(!vma || addr + len <= vma->vm_start)) {
168  /*
169  * Remember the place where we stopped the search:
170  */
171  mm->free_area_cache = addr + len;
172  return addr;
173  }
174  if (addr + mm->cached_hole_size < vma->vm_start)
175  mm->cached_hole_size = vma->vm_start - addr;
176 
177  addr = vma->vm_end;
178  if (do_color_align)
179  addr = COLOUR_ALIGN(addr, pgoff);
180  }
181 }
182 
183 unsigned long
184 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
185  const unsigned long len, const unsigned long pgoff,
186  const unsigned long flags)
187 {
188  struct vm_area_struct *vma;
189  struct mm_struct *mm = current->mm;
190  unsigned long task_size = STACK_TOP32;
191  unsigned long addr = addr0;
192  int do_color_align;
193 
194  /* This should only ever run for 32-bit processes. */
195  BUG_ON(!test_thread_flag(TIF_32BIT));
196 
197  if (flags & MAP_FIXED) {
198  /* We do not accept a shared mapping if it would violate
199  * cache aliasing constraints.
200  */
201  if ((flags & MAP_SHARED) &&
202  ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
203  return -EINVAL;
204  return addr;
205  }
206 
207  if (unlikely(len > task_size))
208  return -ENOMEM;
209 
210  do_color_align = 0;
211  if (filp || (flags & MAP_SHARED))
212  do_color_align = 1;
213 
214  /* requesting a specific address */
215  if (addr) {
216  if (do_color_align)
217  addr = COLOUR_ALIGN(addr, pgoff);
218  else
219  addr = PAGE_ALIGN(addr);
220 
221  vma = find_vma(mm, addr);
222  if (task_size - len >= addr &&
223  (!vma || addr + len <= vma->vm_start))
224  return addr;
225  }
226 
227  /* check if free_area_cache is useful for us */
228  if (len <= mm->cached_hole_size) {
229  mm->cached_hole_size = 0;
230  mm->free_area_cache = mm->mmap_base;
231  }
232 
233  /* either no address requested or can't fit in requested address hole */
234  addr = mm->free_area_cache;
235  if (do_color_align) {
236  unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
237 
238  addr = base + len;
239  }
240 
241  /* make sure it can fit in the remaining address space */
242  if (likely(addr > len)) {
243  vma = find_vma(mm, addr-len);
244  if (!vma || addr <= vma->vm_start) {
245  /* remember the address as a hint for next time */
246  return (mm->free_area_cache = addr-len);
247  }
248  }
249 
250  if (unlikely(mm->mmap_base < len))
251  goto bottomup;
252 
253  addr = mm->mmap_base-len;
254  if (do_color_align)
255  addr = COLOUR_ALIGN_DOWN(addr, pgoff);
256 
257  do {
258  /*
259  * Lookup failure means no vma is above this address,
260  * else if new region fits below vma->vm_start,
261  * return with success:
262  */
263  vma = find_vma(mm, addr);
264  if (likely(!vma || addr+len <= vma->vm_start)) {
265  /* remember the address as a hint for next time */
266  return (mm->free_area_cache = addr);
267  }
268 
269  /* remember the largest hole we saw so far */
270  if (addr + mm->cached_hole_size < vma->vm_start)
271  mm->cached_hole_size = vma->vm_start - addr;
272 
273  /* try just below the current vma->vm_start */
274  addr = vma->vm_start-len;
275  if (do_color_align)
276  addr = COLOUR_ALIGN_DOWN(addr, pgoff);
277  } while (likely(len < vma->vm_start));
278 
279 bottomup:
280  /*
281  * A failed mmap() very likely causes application failure,
282  * so fall back to the bottom-up function here. This scenario
283  * can happen with large stack limits and large mmap()
284  * allocations.
285  */
286  mm->cached_hole_size = ~0UL;
288  addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
289  /*
290  * Restore the topdown base:
291  */
292  mm->free_area_cache = mm->mmap_base;
293  mm->cached_hole_size = ~0UL;
294 
295  return addr;
296 }
297 
298 /* Try to align mapping such that we align it as much as possible. */
299 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
300 {
301  unsigned long align_goal, addr = -ENOMEM;
302  unsigned long (*get_area)(struct file *, unsigned long,
303  unsigned long, unsigned long, unsigned long);
304 
305  get_area = current->mm->get_unmapped_area;
306 
307  if (flags & MAP_FIXED) {
308  /* Ok, don't mess with it. */
309  return get_area(NULL, orig_addr, len, pgoff, flags);
310  }
311  flags &= ~MAP_SHARED;
312 
313  align_goal = PAGE_SIZE;
314  if (len >= (4UL * 1024 * 1024))
315  align_goal = (4UL * 1024 * 1024);
316  else if (len >= (512UL * 1024))
317  align_goal = (512UL * 1024);
318  else if (len >= (64UL * 1024))
319  align_goal = (64UL * 1024);
320 
321  do {
322  addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
323  if (!(addr & ~PAGE_MASK)) {
324  addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
325  break;
326  }
327 
328  if (align_goal == (4UL * 1024 * 1024))
329  align_goal = (512UL * 1024);
330  else if (align_goal == (512UL * 1024))
331  align_goal = (64UL * 1024);
332  else
333  align_goal = PAGE_SIZE;
334  } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
335 
336  /* Mapping is smaller than 64K or larger areas could not
337  * be obtained.
338  */
339  if (addr & ~PAGE_MASK)
340  addr = get_area(NULL, orig_addr, len, pgoff, flags);
341 
342  return addr;
343 }
345 
346 /* Essentially the same as PowerPC. */
347 static unsigned long mmap_rnd(void)
348 {
349  unsigned long rnd = 0UL;
350 
351  if (current->flags & PF_RANDOMIZE) {
352  unsigned long val = get_random_int();
353  if (test_thread_flag(TIF_32BIT))
354  rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
355  else
356  rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
357  }
358  return rnd << PAGE_SHIFT;
359 }
360 
362 {
363  unsigned long random_factor = mmap_rnd();
364  unsigned long gap;
365 
366  /*
367  * Fall back to the standard layout if the personality
368  * bit is set, or if the expected stack growth is unlimited:
369  */
370  gap = rlimit(RLIMIT_STACK);
371  if (!test_thread_flag(TIF_32BIT) ||
372  (current->personality & ADDR_COMPAT_LAYOUT) ||
373  gap == RLIM_INFINITY ||
375  mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
376  mm->get_unmapped_area = arch_get_unmapped_area;
377  mm->unmap_area = arch_unmap_area;
378  } else {
379  /* We know it's 32-bit */
380  unsigned long task_size = STACK_TOP32;
381 
382  if (gap < 128 * 1024 * 1024)
383  gap = 128 * 1024 * 1024;
384  if (gap > (task_size / 6 * 5))
385  gap = (task_size / 6 * 5);
386 
387  mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
388  mm->get_unmapped_area = arch_get_unmapped_area_topdown;
389  mm->unmap_area = arch_unmap_area_topdown;
390  }
391 }
392 
393 /*
394  * sys_pipe() is the normal C calling standard for creating
395  * a pipe. It's not the way unix traditionally does this, though.
396  */
397 SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
398 {
399  int fd[2];
400  int error;
401 
402  error = do_pipe_flags(fd, 0);
403  if (error)
404  goto out;
405  regs->u_regs[UREG_I1] = fd[1];
406  error = fd[0];
407 out:
408  return error;
409 }
410 
411 /*
412  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
413  *
414  * This is really horribly ugly.
415  */
416 
417 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
418  unsigned long, third, void __user *, ptr, long, fifth)
419 {
420  long err;
421 
422  /* No need for backward compatibility. We can start fresh... */
423  if (call <= SEMCTL) {
424  switch (call) {
425  case SEMOP:
426  err = sys_semtimedop(first, ptr,
427  (unsigned)second, NULL);
428  goto out;
429  case SEMTIMEDOP:
430  err = sys_semtimedop(first, ptr, (unsigned)second,
431  (const struct timespec __user *)
432  (unsigned long) fifth);
433  goto out;
434  case SEMGET:
435  err = sys_semget(first, (int)second, (int)third);
436  goto out;
437  case SEMCTL: {
438  err = sys_semctl(first, second,
439  (int)third | IPC_64,
440  (union semun) ptr);
441  goto out;
442  }
443  default:
444  err = -ENOSYS;
445  goto out;
446  }
447  }
448  if (call <= MSGCTL) {
449  switch (call) {
450  case MSGSND:
451  err = sys_msgsnd(first, ptr, (size_t)second,
452  (int)third);
453  goto out;
454  case MSGRCV:
455  err = sys_msgrcv(first, ptr, (size_t)second, fifth,
456  (int)third);
457  goto out;
458  case MSGGET:
459  err = sys_msgget((key_t)first, (int)second);
460  goto out;
461  case MSGCTL:
462  err = sys_msgctl(first, (int)second | IPC_64, ptr);
463  goto out;
464  default:
465  err = -ENOSYS;
466  goto out;
467  }
468  }
469  if (call <= SHMCTL) {
470  switch (call) {
471  case SHMAT: {
472  ulong raddr;
473  err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
474  if (!err) {
475  if (put_user(raddr,
476  (ulong __user *) third))
477  err = -EFAULT;
478  }
479  goto out;
480  }
481  case SHMDT:
482  err = sys_shmdt(ptr);
483  goto out;
484  case SHMGET:
485  err = sys_shmget(first, (size_t)second, (int)third);
486  goto out;
487  case SHMCTL:
488  err = sys_shmctl(first, (int)second | IPC_64, ptr);
489  goto out;
490  default:
491  err = -ENOSYS;
492  goto out;
493  }
494  } else {
495  err = -ENOSYS;
496  }
497 out:
498  return err;
499 }
500 
502 {
503  int ret;
504 
505  if (personality(current->personality) == PER_LINUX32 &&
509  if (personality(ret) == PER_LINUX32)
510  ret &= ~PER_LINUX32;
511 
512  return ret;
513 }
514 
515 int sparc_mmap_check(unsigned long addr, unsigned long len)
516 {
517  if (test_thread_flag(TIF_32BIT)) {
518  if (len >= STACK_TOP32)
519  return -EINVAL;
520 
521  if (addr > STACK_TOP32 - len)
522  return -EINVAL;
523  } else {
524  if (len >= VA_EXCLUDE_START)
525  return -EINVAL;
526 
527  if (invalid_64bit_range(addr, len))
528  return -EINVAL;
529  }
530 
531  return 0;
532 }
533 
534 /* Linux version of mmap */
535 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
536  unsigned long, prot, unsigned long, flags, unsigned long, fd,
537  unsigned long, off)
538 {
539  unsigned long retval = -EINVAL;
540 
541  if ((off + PAGE_ALIGN(len)) < off)
542  goto out;
543  if (off & ~PAGE_MASK)
544  goto out;
545  retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
546 out:
547  return retval;
548 }
549 
550 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
551 {
552  if (invalid_64bit_range(addr, len))
553  return -EINVAL;
554 
555  return vm_munmap(addr, len);
556 }
557 
558 extern unsigned long do_mremap(unsigned long addr,
559  unsigned long old_len, unsigned long new_len,
560  unsigned long flags, unsigned long new_addr);
561 
562 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
563  unsigned long, new_len, unsigned long, flags,
564  unsigned long, new_addr)
565 {
566  if (test_thread_flag(TIF_32BIT))
567  return -EINVAL;
568  return sys_mremap(addr, old_len, new_len, flags, new_addr);
569 }
570 
571 /* we come to here via sys_nis_syscall so it can setup the regs argument */
572 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
573 {
574  static int count;
575 
576  /* Don't make the system unusable, if someone goes stuck */
577  if (count++ > 5)
578  return -ENOSYS;
579 
580  printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
581 #ifdef DEBUG_UNIMP_SYSCALL
582  show_regs (regs);
583 #endif
584 
585  return -ENOSYS;
586 }
587 
588 /* #define DEBUG_SPARC_BREAKPOINT */
589 
591 {
592  siginfo_t info;
593 
594  if (test_thread_flag(TIF_32BIT)) {
595  regs->tpc &= 0xffffffff;
596  regs->tnpc &= 0xffffffff;
597  }
598 #ifdef DEBUG_SPARC_BREAKPOINT
599  printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
600 #endif
601  info.si_signo = SIGTRAP;
602  info.si_errno = 0;
603  info.si_code = TRAP_BRKPT;
604  info.si_addr = (void __user *)regs->tpc;
605  info.si_trapno = 0;
606  force_sig_info(SIGTRAP, &info, current);
607 #ifdef DEBUG_SPARC_BREAKPOINT
608  printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
609 #endif
610 }
611 
612 extern void check_pending(int signum);
613 
614 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
615 {
616  int nlen, err;
617 
618  if (len < 0)
619  return -EINVAL;
620 
621  down_read(&uts_sem);
622 
623  nlen = strlen(utsname()->domainname) + 1;
624  err = -EINVAL;
625  if (nlen > len)
626  goto out;
627 
628  err = -EFAULT;
629  if (!copy_to_user(name, utsname()->domainname, nlen))
630  err = 0;
631 
632 out:
633  up_read(&uts_sem);
634  return err;
635 }
636 
638  utrap_handler_t, new_p, utrap_handler_t, new_d,
639  utrap_handler_t __user *, old_p,
640  utrap_handler_t __user *, old_d)
641 {
643  return -EINVAL;
644  if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
645  if (old_p) {
646  if (!current_thread_info()->utraps) {
647  if (put_user(NULL, old_p))
648  return -EFAULT;
649  } else {
650  if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
651  return -EFAULT;
652  }
653  }
654  if (old_d) {
655  if (put_user(NULL, old_d))
656  return -EFAULT;
657  }
658  return 0;
659  }
660  if (!current_thread_info()->utraps) {
661  current_thread_info()->utraps =
662  kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
663  if (!current_thread_info()->utraps)
664  return -ENOMEM;
665  current_thread_info()->utraps[0] = 1;
666  } else {
667  if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
668  current_thread_info()->utraps[0] > 1) {
669  unsigned long *p = current_thread_info()->utraps;
670 
671  current_thread_info()->utraps =
672  kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
673  GFP_KERNEL);
674  if (!current_thread_info()->utraps) {
675  current_thread_info()->utraps = p;
676  return -ENOMEM;
677  }
678  p[0]--;
679  current_thread_info()->utraps[0] = 1;
680  memcpy(current_thread_info()->utraps+1, p+1,
681  UT_TRAP_INSTRUCTION_31*sizeof(long));
682  }
683  }
684  if (old_p) {
685  if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
686  return -EFAULT;
687  }
688  if (old_d) {
689  if (put_user(NULL, old_d))
690  return -EFAULT;
691  }
692  current_thread_info()->utraps[type] = (long)new_p;
693 
694  return 0;
695 }
696 
697 asmlinkage long sparc_memory_ordering(unsigned long model,
698  struct pt_regs *regs)
699 {
700  if (model >= 3)
701  return -EINVAL;
702  regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
703  return 0;
704 }
705 
706 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
707  struct sigaction __user *, oact, void __user *, restorer,
708  size_t, sigsetsize)
709 {
710  struct k_sigaction new_ka, old_ka;
711  int ret;
712 
713  /* XXX: Don't preclude handling different sized sigset_t's. */
714  if (sigsetsize != sizeof(sigset_t))
715  return -EINVAL;
716 
717  if (act) {
718  new_ka.ka_restorer = restorer;
719  if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
720  return -EFAULT;
721  }
722 
723  ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
724 
725  if (!ret && oact) {
726  if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
727  return -EFAULT;
728  }
729 
730  return ret;
731 }
732 
733 /*
734  * Do a system call from kernel instead of calling sys_execve so we
735  * end up with proper pt_regs.
736  */
737 int kernel_execve(const char *filename,
738  const char *const argv[],
739  const char *const envp[])
740 {
741  long __res;
742  register long __g1 __asm__ ("g1") = __NR_execve;
743  register long __o0 __asm__ ("o0") = (long)(filename);
744  register long __o1 __asm__ ("o1") = (long)(argv);
745  register long __o2 __asm__ ("o2") = (long)(envp);
746  asm volatile ("t 0x6d\n\t"
747  "sub %%g0, %%o0, %0\n\t"
748  "movcc %%xcc, %%o0, %0\n\t"
749  : "=r" (__res), "=&r" (__o0)
750  : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
751  : "cc");
752  return __res;
753 }
754 
756 {
758 }