Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
uncached.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  *
8  * A simple uncached page allocator using the generic allocator. This
9  * allocator first utilizes the spare (spill) pages found in the EFI
10  * memmap and will then start converting cached pages to uncached ones
11  * at a granule at a time. Node awareness is implemented by having a
12  * pool of pages per node.
13  */
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/efi.h>
22 #include <linux/genalloc.h>
23 #include <linux/gfp.h>
24 #include <asm/page.h>
25 #include <asm/pal.h>
26 #include <asm/pgtable.h>
27 #include <linux/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/sn/arch.h>
30 
31 
33 
34 struct uncached_pool {
35  struct gen_pool *pool;
36  struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
37  int nchunks_added; /* #of converted chunks added to pool */
38  atomic_t status; /* smp called function's return status*/
39 };
40 
41 #define MAX_CONVERTED_CHUNKS_PER_NODE 2
42 
44 
45 
46 static void uncached_ipi_visibility(void *data)
47 {
48  int status;
49  struct uncached_pool *uc_pool = (struct uncached_pool *)data;
50 
51  status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
52  if ((status != PAL_VISIBILITY_OK) &&
54  atomic_inc(&uc_pool->status);
55 }
56 
57 
58 static void uncached_ipi_mc_drain(void *data)
59 {
60  int status;
61  struct uncached_pool *uc_pool = (struct uncached_pool *)data;
62 
63  status = ia64_pal_mc_drain();
64  if (status != PAL_STATUS_SUCCESS)
65  atomic_inc(&uc_pool->status);
66 }
67 
68 
69 /*
70  * Add a new chunk of uncached memory pages to the specified pool.
71  *
72  * @pool: pool to add new chunk of uncached memory to
73  * @nid: node id of node to allocate memory from, or -1
74  *
75  * This is accomplished by first allocating a granule of cached memory pages
76  * and then converting them to uncached memory pages.
77  */
78 static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
79 {
80  struct page *page;
81  int status, i, nchunks_added = uc_pool->nchunks_added;
82  unsigned long c_addr, uc_addr;
83 
84  if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
85  return -1; /* interrupted by a signal */
86 
87  if (uc_pool->nchunks_added > nchunks_added) {
88  /* someone added a new chunk while we were waiting */
89  mutex_unlock(&uc_pool->add_chunk_mutex);
90  return 0;
91  }
92 
94  mutex_unlock(&uc_pool->add_chunk_mutex);
95  return -1;
96  }
97 
98  /* attempt to allocate a granule's worth of cached memory pages */
99 
100  page = alloc_pages_exact_node(nid,
102  IA64_GRANULE_SHIFT-PAGE_SHIFT);
103  if (!page) {
104  mutex_unlock(&uc_pool->add_chunk_mutex);
105  return -1;
106  }
107 
108  /* convert the memory pages from cached to uncached */
109 
110  c_addr = (unsigned long)page_address(page);
111  uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
112 
113  /*
114  * There's a small race here where it's possible for someone to
115  * access the page through /dev/mem halfway through the conversion
116  * to uncached - not sure it's really worth bothering about
117  */
118  for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
119  SetPageUncached(&page[i]);
120 
121  flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
122 
123  status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
124  if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
125  atomic_set(&uc_pool->status, 0);
126  status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
127  if (status || atomic_read(&uc_pool->status))
128  goto failed;
129  } else if (status != PAL_VISIBILITY_OK)
130  goto failed;
131 
132  preempt_disable();
133 
134  if (ia64_platform_is("sn2"))
136  else
137  flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
138 
139  /* flush the just introduced uncached translation from the TLB */
141 
142  preempt_enable();
143 
144  status = ia64_pal_mc_drain();
145  if (status != PAL_STATUS_SUCCESS)
146  goto failed;
147  atomic_set(&uc_pool->status, 0);
148  status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
149  if (status || atomic_read(&uc_pool->status))
150  goto failed;
151 
152  /*
153  * The chunk of memory pages has been converted to uncached so now we
154  * can add it to the pool.
155  */
156  status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
157  if (status)
158  goto failed;
159 
160  uc_pool->nchunks_added++;
161  mutex_unlock(&uc_pool->add_chunk_mutex);
162  return 0;
163 
164  /* failed to convert or add the chunk so give it back to the kernel */
165 failed:
166  for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
167  ClearPageUncached(&page[i]);
168 
169  free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
170  mutex_unlock(&uc_pool->add_chunk_mutex);
171  return -1;
172 }
173 
174 
175 /*
176  * uncached_alloc_page
177  *
178  * @starting_nid: node id of node to start with, or -1
179  * @n_pages: number of contiguous pages to allocate
180  *
181  * Allocate the specified number of contiguous uncached pages on the
182  * the requested node. If not enough contiguous uncached pages are available
183  * on the requested node, roundrobin starting with the next higher node.
184  */
185 unsigned long uncached_alloc_page(int starting_nid, int n_pages)
186 {
187  unsigned long uc_addr;
188  struct uncached_pool *uc_pool;
189  int nid;
190 
191  if (unlikely(starting_nid >= MAX_NUMNODES))
192  return 0;
193 
194  if (starting_nid < 0)
195  starting_nid = numa_node_id();
196  nid = starting_nid;
197 
198  do {
199  if (!node_state(nid, N_HIGH_MEMORY))
200  continue;
201  uc_pool = &uncached_pools[nid];
202  if (uc_pool->pool == NULL)
203  continue;
204  do {
205  uc_addr = gen_pool_alloc(uc_pool->pool,
206  n_pages * PAGE_SIZE);
207  if (uc_addr != 0)
208  return uc_addr;
209  } while (uncached_add_chunk(uc_pool, nid) == 0);
210 
211  } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
212 
213  return 0;
214 }
216 
217 
218 /*
219  * uncached_free_page
220  *
221  * @uc_addr: uncached address of first page to free
222  * @n_pages: number of contiguous pages to free
223  *
224  * Free the specified number of uncached pages.
225  */
226 void uncached_free_page(unsigned long uc_addr, int n_pages)
227 {
228  int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
229  struct gen_pool *pool = uncached_pools[nid].pool;
230 
231  if (unlikely(pool == NULL))
232  return;
233 
234  if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
235  panic("uncached_free_page invalid address %lx\n", uc_addr);
236 
237  gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
238 }
240 
241 
242 /*
243  * uncached_build_memmap,
244  *
245  * @uc_start: uncached starting address of a chunk of uncached memory
246  * @uc_end: uncached ending address of a chunk of uncached memory
247  * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
248  *
249  * Called at boot time to build a map of pages that can be used for
250  * memory special operations.
251  */
252 static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
253 {
254  int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
255  struct gen_pool *pool = uncached_pools[nid].pool;
256  size_t size = uc_end - uc_start;
257 
259 
260  if (pool != NULL) {
261  memset((char *)uc_start, 0, size);
262  (void) gen_pool_add(pool, uc_start, size, nid);
263  }
264  return 0;
265 }
266 
267 
268 static int __init uncached_init(void)
269 {
270  int nid;
271 
273  uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
274  mutex_init(&uncached_pools[nid].add_chunk_mutex);
275  }
276 
277  efi_memmap_walk_uc(uncached_build_memmap, NULL);
278  return 0;
279 }
280