Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pagemap.h
Go to the documentation of this file.
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3 
4 /*
5  * Copyright 1995 Linus Torvalds
6  */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
17 
18 /*
19  * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20  * allocation mode flags.
21  */
23  AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24  AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25  AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26  AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 };
28 
29 static inline void mapping_set_error(struct address_space *mapping, int error)
30 {
31  if (unlikely(error)) {
32  if (error == -ENOSPC)
33  set_bit(AS_ENOSPC, &mapping->flags);
34  else
35  set_bit(AS_EIO, &mapping->flags);
36  }
37 }
38 
39 static inline void mapping_set_unevictable(struct address_space *mapping)
40 {
41  set_bit(AS_UNEVICTABLE, &mapping->flags);
42 }
43 
44 static inline void mapping_clear_unevictable(struct address_space *mapping)
45 {
46  clear_bit(AS_UNEVICTABLE, &mapping->flags);
47 }
48 
49 static inline int mapping_unevictable(struct address_space *mapping)
50 {
51  if (mapping)
52  return test_bit(AS_UNEVICTABLE, &mapping->flags);
53  return !!mapping;
54 }
55 
56 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
57 {
58  return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
59 }
60 
61 /*
62  * This is non-atomic. Only to be used before the mapping is activated.
63  * Probably needs a barrier...
64  */
65 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
66 {
67  m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
68  (__force unsigned long)mask;
69 }
70 
71 /*
72  * The page cache can done in larger chunks than
73  * one page, because it allows for more efficient
74  * throughput (it can then be mapped into user
75  * space in smaller chunks for same flexibility).
76  *
77  * Or rather, it _will_ be done in larger chunks.
78  */
79 #define PAGE_CACHE_SHIFT PAGE_SHIFT
80 #define PAGE_CACHE_SIZE PAGE_SIZE
81 #define PAGE_CACHE_MASK PAGE_MASK
82 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
83 
84 #define page_cache_get(page) get_page(page)
85 #define page_cache_release(page) put_page(page)
86 void release_pages(struct page **pages, int nr, int cold);
87 
88 /*
89  * speculatively take a reference to a page.
90  * If the page is free (_count == 0), then _count is untouched, and 0
91  * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
92  *
93  * This function must be called inside the same rcu_read_lock() section as has
94  * been used to lookup the page in the pagecache radix-tree (or page table):
95  * this allows allocators to use a synchronize_rcu() to stabilize _count.
96  *
97  * Unless an RCU grace period has passed, the count of all pages coming out
98  * of the allocator must be considered unstable. page_count may return higher
99  * than expected, and put_page must be able to do the right thing when the
100  * page has been finished with, no matter what it is subsequently allocated
101  * for (because put_page is what is used here to drop an invalid speculative
102  * reference).
103  *
104  * This is the interesting part of the lockless pagecache (and lockless
105  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
106  * has the following pattern:
107  * 1. find page in radix tree
108  * 2. conditionally increment refcount
109  * 3. check the page is still in pagecache (if no, goto 1)
110  *
111  * Remove-side that cares about stability of _count (eg. reclaim) has the
112  * following (with tree_lock held for write):
113  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
114  * B. remove page from pagecache
115  * C. free the page
116  *
117  * There are 2 critical interleavings that matter:
118  * - 2 runs before A: in this case, A sees elevated refcount and bails out
119  * - A runs before 2: in this case, 2 sees zero refcount and retries;
120  * subsequently, B will complete and 1 will find no page, causing the
121  * lookup to return NULL.
122  *
123  * It is possible that between 1 and 2, the page is removed then the exact same
124  * page is inserted into the same position in pagecache. That's OK: the
125  * old find_get_page using tree_lock could equally have run before or after
126  * such a re-insertion, depending on order that locks are granted.
127  *
128  * Lookups racing against pagecache insertion isn't a big problem: either 1
129  * will find the page or it will not. Likewise, the old find_get_page could run
130  * either before the insertion or afterwards, depending on timing.
131  */
132 static inline int page_cache_get_speculative(struct page *page)
133 {
135 
136 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137 # ifdef CONFIG_PREEMPT_COUNT
138  VM_BUG_ON(!in_atomic());
139 # endif
140  /*
141  * Preempt must be disabled here - we rely on rcu_read_lock doing
142  * this for us.
143  *
144  * Pagecache won't be truncated from interrupt context, so if we have
145  * found a page in the radix tree here, we have pinned its refcount by
146  * disabling preempt, and hence no need for the "speculative get" that
147  * SMP requires.
148  */
149  VM_BUG_ON(page_count(page) == 0);
150  atomic_inc(&page->_count);
151 
152 #else
153  if (unlikely(!get_page_unless_zero(page))) {
154  /*
155  * Either the page has been freed, or will be freed.
156  * In either case, retry here and the caller should
157  * do the right thing (see comments above).
158  */
159  return 0;
160  }
161 #endif
162  VM_BUG_ON(PageTail(page));
163 
164  return 1;
165 }
166 
167 /*
168  * Same as above, but add instead of inc (could just be merged)
169  */
170 static inline int page_cache_add_speculative(struct page *page, int count)
171 {
173 
174 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175 # ifdef CONFIG_PREEMPT_COUNT
176  VM_BUG_ON(!in_atomic());
177 # endif
178  VM_BUG_ON(page_count(page) == 0);
179  atomic_add(count, &page->_count);
180 
181 #else
182  if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
183  return 0;
184 #endif
185  VM_BUG_ON(PageCompound(page) && page != compound_head(page));
186 
187  return 1;
188 }
189 
190 static inline int page_freeze_refs(struct page *page, int count)
191 {
192  return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
193 }
194 
195 static inline void page_unfreeze_refs(struct page *page, int count)
196 {
197  VM_BUG_ON(page_count(page) != 0);
198  VM_BUG_ON(count == 0);
199 
200  atomic_set(&page->_count, count);
201 }
202 
203 #ifdef CONFIG_NUMA
204 extern struct page *__page_cache_alloc(gfp_t gfp);
205 #else
206 static inline struct page *__page_cache_alloc(gfp_t gfp)
207 {
208  return alloc_pages(gfp, 0);
209 }
210 #endif
211 
212 static inline struct page *page_cache_alloc(struct address_space *x)
213 {
214  return __page_cache_alloc(mapping_gfp_mask(x));
215 }
216 
217 static inline struct page *page_cache_alloc_cold(struct address_space *x)
218 {
219  return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220 }
221 
222 static inline struct page *page_cache_alloc_readahead(struct address_space *x)
223 {
224  return __page_cache_alloc(mapping_gfp_mask(x) |
226 }
227 
228 typedef int filler_t(void *, struct page *);
229 
230 extern struct page * find_get_page(struct address_space *mapping,
231  pgoff_t index);
232 extern struct page * find_lock_page(struct address_space *mapping,
233  pgoff_t index);
234 extern struct page * find_or_create_page(struct address_space *mapping,
236 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
237  unsigned int nr_pages, struct page **pages);
238 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
239  unsigned int nr_pages, struct page **pages);
240 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
241  int tag, unsigned int nr_pages, struct page **pages);
242 
243 struct page *grab_cache_page_write_begin(struct address_space *mapping,
244  pgoff_t index, unsigned flags);
245 
246 /*
247  * Returns locked page at given index in given cache, creating it if needed.
248  */
249 static inline struct page *grab_cache_page(struct address_space *mapping,
250  pgoff_t index)
251 {
252  return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
253 }
254 
255 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
256  pgoff_t index);
257 extern struct page * read_cache_page_async(struct address_space *mapping,
258  pgoff_t index, filler_t *filler, void *data);
259 extern struct page * read_cache_page(struct address_space *mapping,
260  pgoff_t index, filler_t *filler, void *data);
261 extern struct page * read_cache_page_gfp(struct address_space *mapping,
262  pgoff_t index, gfp_t gfp_mask);
263 extern int read_cache_pages(struct address_space *mapping,
264  struct list_head *pages, filler_t *filler, void *data);
265 
266 static inline struct page *read_mapping_page_async(
267  struct address_space *mapping,
268  pgoff_t index, void *data)
269 {
270  filler_t *filler = (filler_t *)mapping->a_ops->readpage;
271  return read_cache_page_async(mapping, index, filler, data);
272 }
273 
274 static inline struct page *read_mapping_page(struct address_space *mapping,
275  pgoff_t index, void *data)
276 {
277  filler_t *filler = (filler_t *)mapping->a_ops->readpage;
278  return read_cache_page(mapping, index, filler, data);
279 }
280 
281 /*
282  * Return byte-offset into filesystem object for page.
283  */
284 static inline loff_t page_offset(struct page *page)
285 {
286  return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
287 }
288 
289 static inline loff_t page_file_offset(struct page *page)
290 {
291  return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
292 }
293 
294 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
295  unsigned long address);
296 
297 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
298  unsigned long address)
299 {
300  pgoff_t pgoff;
301  if (unlikely(is_vm_hugetlb_page(vma)))
302  return linear_hugepage_index(vma, address);
303  pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
304  pgoff += vma->vm_pgoff;
305  return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
306 }
307 
308 extern void __lock_page(struct page *page);
309 extern int __lock_page_killable(struct page *page);
310 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
311  unsigned int flags);
312 extern void unlock_page(struct page *page);
313 
314 static inline void __set_page_locked(struct page *page)
315 {
316  __set_bit(PG_locked, &page->flags);
317 }
318 
319 static inline void __clear_page_locked(struct page *page)
320 {
321  __clear_bit(PG_locked, &page->flags);
322 }
323 
324 static inline int trylock_page(struct page *page)
325 {
326  return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
327 }
328 
329 /*
330  * lock_page may only be called if we have the page's inode pinned.
331  */
332 static inline void lock_page(struct page *page)
333 {
334  might_sleep();
335  if (!trylock_page(page))
336  __lock_page(page);
337 }
338 
339 /*
340  * lock_page_killable is like lock_page but can be interrupted by fatal
341  * signals. It returns 0 if it locked the page and -EINTR if it was
342  * killed while waiting.
343  */
344 static inline int lock_page_killable(struct page *page)
345 {
346  might_sleep();
347  if (!trylock_page(page))
348  return __lock_page_killable(page);
349  return 0;
350 }
351 
352 /*
353  * lock_page_or_retry - Lock the page, unless this would block and the
354  * caller indicated that it can handle a retry.
355  */
356 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
357  unsigned int flags)
358 {
359  might_sleep();
360  return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
361 }
362 
363 /*
364  * This is exported only for wait_on_page_locked/wait_on_page_writeback.
365  * Never use this directly!
366  */
367 extern void wait_on_page_bit(struct page *page, int bit_nr);
368 
369 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
370 
371 static inline int wait_on_page_locked_killable(struct page *page)
372 {
373  if (PageLocked(page))
374  return wait_on_page_bit_killable(page, PG_locked);
375  return 0;
376 }
377 
378 /*
379  * Wait for a page to be unlocked.
380  *
381  * This must be called with the caller "holding" the page,
382  * ie with increased "page->count" so that the page won't
383  * go away during the wait..
384  */
385 static inline void wait_on_page_locked(struct page *page)
386 {
387  if (PageLocked(page))
389 }
390 
391 /*
392  * Wait for a page to complete writeback
393  */
394 static inline void wait_on_page_writeback(struct page *page)
395 {
396  if (PageWriteback(page))
398 }
399 
400 extern void end_page_writeback(struct page *page);
401 
402 /*
403  * Add an arbitrary waiter to a page's wait queue
404  */
405 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
406 
407 /*
408  * Fault a userspace page into pagetables. Return non-zero on a fault.
409  *
410  * This assumes that two userspace pages are always sufficient. That's
411  * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
412  */
413 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
414 {
415  int ret;
416 
417  if (unlikely(size == 0))
418  return 0;
419 
420  /*
421  * Writing zeroes into userspace here is OK, because we know that if
422  * the zero gets there, we'll be overwriting it.
423  */
424  ret = __put_user(0, uaddr);
425  if (ret == 0) {
426  char __user *end = uaddr + size - 1;
427 
428  /*
429  * If the page was already mapped, this will get a cache miss
430  * for sure, so try to avoid doing it.
431  */
432  if (((unsigned long)uaddr & PAGE_MASK) !=
433  ((unsigned long)end & PAGE_MASK))
434  ret = __put_user(0, end);
435  }
436  return ret;
437 }
438 
439 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
440 {
441  volatile char c;
442  int ret;
443 
444  if (unlikely(size == 0))
445  return 0;
446 
447  ret = __get_user(c, uaddr);
448  if (ret == 0) {
449  const char __user *end = uaddr + size - 1;
450 
451  if (((unsigned long)uaddr & PAGE_MASK) !=
452  ((unsigned long)end & PAGE_MASK)) {
453  ret = __get_user(c, end);
454  (void)c;
455  }
456  }
457  return ret;
458 }
459 
460 /*
461  * Multipage variants of the above prefault helpers, useful if more than
462  * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
463  * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
464  * filemap.c hotpaths.
465  */
466 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
467 {
468  int ret = 0;
469  char __user *end = uaddr + size - 1;
470 
471  if (unlikely(size == 0))
472  return ret;
473 
474  /*
475  * Writing zeroes into userspace here is OK, because we know that if
476  * the zero gets there, we'll be overwriting it.
477  */
478  while (uaddr <= end) {
479  ret = __put_user(0, uaddr);
480  if (ret != 0)
481  return ret;
482  uaddr += PAGE_SIZE;
483  }
484 
485  /* Check whether the range spilled into the next page. */
486  if (((unsigned long)uaddr & PAGE_MASK) ==
487  ((unsigned long)end & PAGE_MASK))
488  ret = __put_user(0, end);
489 
490  return ret;
491 }
492 
493 static inline int fault_in_multipages_readable(const char __user *uaddr,
494  int size)
495 {
496  volatile char c;
497  int ret = 0;
498  const char __user *end = uaddr + size - 1;
499 
500  if (unlikely(size == 0))
501  return ret;
502 
503  while (uaddr <= end) {
504  ret = __get_user(c, uaddr);
505  if (ret != 0)
506  return ret;
507  uaddr += PAGE_SIZE;
508  }
509 
510  /* Check whether the range spilled into the next page. */
511  if (((unsigned long)uaddr & PAGE_MASK) ==
512  ((unsigned long)end & PAGE_MASK)) {
513  ret = __get_user(c, end);
514  (void)c;
515  }
516 
517  return ret;
518 }
519 
520 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
521  pgoff_t index, gfp_t gfp_mask);
522 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
523  pgoff_t index, gfp_t gfp_mask);
524 extern void delete_from_page_cache(struct page *page);
525 extern void __delete_from_page_cache(struct page *page);
526 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
527 
528 /*
529  * Like add_to_page_cache_locked, but used to add newly allocated pages:
530  * the page is new, so we can just run __set_page_locked() against it.
531  */
532 static inline int add_to_page_cache(struct page *page,
533  struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
534 {
535  int error;
536 
537  __set_page_locked(page);
538  error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
539  if (unlikely(error))
540  __clear_page_locked(page);
541  return error;
542 }
543 
544 #endif /* _LINUX_PAGEMAP_H */