Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
maccess.c
Go to the documentation of this file.
1 /*
2  * Access kernel memory without faulting -- s390 specific implementation.
3  *
4  * Copyright IBM Corp. 2009
5  *
6  * Author(s): Heiko Carstens <[email protected]>,
7  *
8  */
9 
10 #include <linux/uaccess.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/gfp.h>
15 #include <linux/cpu.h>
16 #include <asm/ctl_reg.h>
17 
18 /*
19  * This function writes to kernel memory bypassing DAT and possible
20  * write protection. It copies one to four bytes from src to dst
21  * using the stura instruction.
22  * Returns the number of bytes copied or -EFAULT.
23  */
24 static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
25 {
26  unsigned long count, aligned;
27  int offset, mask;
28  int rc = -EFAULT;
29 
30  aligned = (unsigned long) dst & ~3UL;
31  offset = (unsigned long) dst & 3;
32  count = min_t(unsigned long, 4 - offset, size);
33  mask = (0xf << (4 - count)) & 0xf;
34  mask >>= offset;
35  asm volatile(
36  " bras 1,0f\n"
37  " icm 0,0,0(%3)\n"
38  "0: l 0,0(%1)\n"
39  " lra %1,0(%1)\n"
40  "1: ex %2,0(1)\n"
41  "2: stura 0,%1\n"
42  " la %0,0\n"
43  "3:\n"
44  EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
45  : "+d" (rc), "+a" (aligned)
46  : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
47  return rc ? rc : count;
48 }
49 
50 long probe_kernel_write(void *dst, const void *src, size_t size)
51 {
52  long copied = 0;
53 
54  while (size) {
55  copied = probe_kernel_write_odd(dst, src, size);
56  if (copied < 0)
57  break;
58  dst += copied;
59  src += copied;
60  size -= copied;
61  }
62  return copied < 0 ? -EFAULT : 0;
63 }
64 
65 static int __memcpy_real(void *dest, void *src, size_t count)
66 {
67  register unsigned long _dest asm("2") = (unsigned long) dest;
68  register unsigned long _len1 asm("3") = (unsigned long) count;
69  register unsigned long _src asm("4") = (unsigned long) src;
70  register unsigned long _len2 asm("5") = (unsigned long) count;
71  int rc = -EFAULT;
72 
73  asm volatile (
74  "0: mvcle %1,%2,0x0\n"
75  "1: jo 0b\n"
76  " lhi %0,0x0\n"
77  "2:\n"
78  EX_TABLE(1b,2b)
79  : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
80  "+d" (_len2), "=m" (*((long *) dest))
81  : "m" (*((long *) src))
82  : "cc", "memory");
83  return rc;
84 }
85 
86 /*
87  * Copy memory in real mode (kernel to kernel)
88  */
89 int memcpy_real(void *dest, void *src, size_t count)
90 {
91  unsigned long flags;
92  int rc;
93 
94  if (!count)
95  return 0;
96  local_irq_save(flags);
97  __arch_local_irq_stnsm(0xfbUL);
98  rc = __memcpy_real(dest, src, count);
99  local_irq_restore(flags);
100  return rc;
101 }
102 
103 /*
104  * Copy memory in absolute mode (kernel to kernel)
105  */
106 void memcpy_absolute(void *dest, void *src, size_t count)
107 {
108  unsigned long cr0, flags, prefix;
109 
110  flags = arch_local_irq_save();
111  __ctl_store(cr0, 0, 0);
112  __ctl_clear_bit(0, 28); /* disable lowcore protection */
113  prefix = store_prefix();
114  if (prefix) {
116  set_prefix(0);
117  memcpy(dest, src, count);
118  set_prefix(prefix);
120  } else {
121  memcpy(dest, src, count);
122  }
123  __ctl_load(cr0, 0, 0);
124  arch_local_irq_restore(flags);
125 }
126 
127 /*
128  * Copy memory from kernel (real) to user (virtual)
129  */
130 int copy_to_user_real(void __user *dest, void *src, size_t count)
131 {
132  int offs = 0, size, rc;
133  char *buf;
134 
135  buf = (char *) __get_free_page(GFP_KERNEL);
136  if (!buf)
137  return -ENOMEM;
138  rc = -EFAULT;
139  while (offs < count) {
140  size = min(PAGE_SIZE, count - offs);
141  if (memcpy_real(buf, src + offs, size))
142  goto out;
143  if (copy_to_user(dest + offs, buf, size))
144  goto out;
145  offs += size;
146  }
147  rc = 0;
148 out:
149  free_page((unsigned long) buf);
150  return rc;
151 }
152 
153 /*
154  * Copy memory from user (virtual) to kernel (real)
155  */
156 int copy_from_user_real(void *dest, void __user *src, size_t count)
157 {
158  int offs = 0, size, rc;
159  char *buf;
160 
161  buf = (char *) __get_free_page(GFP_KERNEL);
162  if (!buf)
163  return -ENOMEM;
164  rc = -EFAULT;
165  while (offs < count) {
166  size = min(PAGE_SIZE, count - offs);
167  if (copy_from_user(buf, src + offs, size))
168  goto out;
169  if (memcpy_real(dest + offs, buf, size))
170  goto out;
171  offs += size;
172  }
173  rc = 0;
174 out:
175  free_page((unsigned long) buf);
176  return rc;
177 }
178 
179 /*
180  * Check if physical address is within prefix or zero page
181  */
182 static int is_swapped(unsigned long addr)
183 {
184  unsigned long lc;
185  int cpu;
186 
187  if (addr < sizeof(struct _lowcore))
188  return 1;
189  for_each_online_cpu(cpu) {
190  lc = (unsigned long) lowcore_ptr[cpu];
191  if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
192  continue;
193  return 1;
194  }
195  return 0;
196 }
197 
198 /*
199  * Convert a physical pointer for /dev/mem access
200  *
201  * For swapped prefix pages a new buffer is returned that contains a copy of
202  * the absolute memory. The buffer size is maximum one page large.
203  */
204 void *xlate_dev_mem_ptr(unsigned long addr)
205 {
206  void *bounce = (void *) addr;
207  unsigned long size;
208 
209  get_online_cpus();
210  preempt_disable();
211  if (is_swapped(addr)) {
212  size = PAGE_SIZE - (addr & ~PAGE_MASK);
213  bounce = (void *) __get_free_page(GFP_ATOMIC);
214  if (bounce)
215  memcpy_absolute(bounce, (void *) addr, size);
216  }
217  preempt_enable();
218  put_online_cpus();
219  return bounce;
220 }
221 
222 /*
223  * Free converted buffer for /dev/mem access (if necessary)
224  */
225 void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
226 {
227  if ((void *) addr != buf)
228  free_page((unsigned long) buf);
229 }