20 #include <linux/export.h>
27 #include <linux/hash.h>
30 #include <asm/tlbflush.h>
33 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
53 unsigned int nr_free_highpages (
void)
56 unsigned int pages = 0;
59 pages += zone_page_state(&pgdat->
node_zones[ZONE_HIGHMEM],
61 if (zone_movable_is_highmem())
62 pages += zone_page_state(
71 static unsigned int last_pkmap_nr;
83 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
84 #define lock_kmap() spin_lock_irq(&kmap_lock)
85 #define unlock_kmap() spin_unlock_irq(&kmap_lock)
86 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
87 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
89 #define lock_kmap() spin_lock(&kmap_lock)
90 #define unlock_kmap() spin_unlock(&kmap_lock)
91 #define lock_kmap_any(flags) \
92 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
93 #define unlock_kmap_any(flags) \
94 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
99 unsigned long addr = (
unsigned long)vaddr;
103 return pte_page(pkmap_page_table[i]);
109 static void flush_all_zero_pkmaps(
void)
125 if (pkmap_count[i] != 1)
139 page =
pte_page(pkmap_page_table[i]);
141 &pkmap_page_table[i]);
143 set_page_address(page,
NULL);
156 flush_all_zero_pkmaps();
160 static inline unsigned long map_new_virtual(
struct page *page)
170 if (!last_pkmap_nr) {
171 flush_all_zero_pkmaps();
174 if (!pkmap_count[last_pkmap_nr])
204 pkmap_count[last_pkmap_nr] = 1;
205 set_page_address(page, (
void *)vaddr);
229 vaddr = map_new_virtual(page);
233 return (
void*)
vaddr;
238 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
253 lock_kmap_any(flags);
259 unlock_kmap_any(flags);
260 return (
void*)
vaddr;
278 lock_kmap_any(flags);
288 switch (--pkmap_count[nr]) {
302 need_wakeup = waitqueue_active(&pkmap_map_wait);
304 unlock_kmap_any(flags);
314 #if defined(HASHED_PAGE_VIRTUAL)
316 #define PA_HASH_ORDER 7
321 struct page_address_map {
330 static struct list_head page_address_pool;
336 static struct page_address_slot {
341 static struct page_address_slot *page_slot(
const struct page *page)
343 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
356 struct page_address_slot *pas;
358 if (!PageHighMem(page))
359 return lowmem_page_address(page);
361 pas = page_slot(page);
364 if (!list_empty(&pas->lh)) {
365 struct page_address_map *
pam;
368 if (pam->page == page) {
375 spin_unlock_irqrestore(&pas->lock, flags);
386 void set_page_address(
struct page *page,
void *
virtual)
389 struct page_address_slot *pas;
390 struct page_address_map *
pam;
392 BUG_ON(!PageHighMem(page));
394 pas = page_slot(page);
396 BUG_ON(list_empty(&page_address_pool));
400 struct page_address_map,
list);
402 spin_unlock_irqrestore(&pool_lock, flags);
405 pam->virtual =
virtual;
409 spin_unlock_irqrestore(&pas->lock, flags);
413 if (pam->page == page) {
415 spin_unlock_irqrestore(&pas->lock, flags);
418 spin_unlock_irqrestore(&pool_lock, flags);
422 spin_unlock_irqrestore(&pas->lock, flags);
428 static struct page_address_map page_address_maps[LAST_PKMAP];
430 void __init page_address_init(
void)
434 INIT_LIST_HEAD(&page_address_pool);
435 for (i = 0; i <
ARRAY_SIZE(page_address_maps); i++)
436 list_add(&page_address_maps[i].
list, &page_address_pool);
437 for (i = 0; i <
ARRAY_SIZE(page_address_htable); i++) {
438 INIT_LIST_HEAD(&page_address_htable[i].lh);