1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
9 #include <linux/list.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
14 #include <linux/bitops.h>
79 #define PAGE_CACHE_SHIFT PAGE_SHIFT
80 #define PAGE_CACHE_SIZE PAGE_SIZE
81 #define PAGE_CACHE_MASK PAGE_MASK
82 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
84 #define page_cache_get(page) get_page(page)
85 #define page_cache_release(page) put_page(page)
132 static inline int page_cache_get_speculative(
struct page *
page)
136 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137 # ifdef CONFIG_PREEMPT_COUNT
153 if (
unlikely(!get_page_unless_zero(page))) {
170 static inline int page_cache_add_speculative(
struct page *page,
int count)
174 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175 # ifdef CONFIG_PREEMPT_COUNT
185 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
190 static inline int page_freeze_refs(
struct page *page,
int count)
195 static inline void page_unfreeze_refs(
struct page *page,
int count)
204 extern struct page *__page_cache_alloc(
gfp_t gfp);
206 static inline struct page *__page_cache_alloc(
gfp_t gfp)
212 static inline struct page *page_cache_alloc(
struct address_space *
x)
214 return __page_cache_alloc(mapping_gfp_mask(x));
217 static inline struct page *page_cache_alloc_cold(
struct address_space *x)
219 return __page_cache_alloc(mapping_gfp_mask(x)|
__GFP_COLD);
222 static inline struct page *page_cache_alloc_readahead(
struct address_space *x)
224 return __page_cache_alloc(mapping_gfp_mask(x) |
237 unsigned int nr_pages,
struct page **
pages);
239 unsigned int nr_pages,
struct page **
pages);
241 int tag,
unsigned int nr_pages,
struct page **
pages);
249 static inline struct page *grab_cache_page(
struct address_space *mapping,
266 static inline struct page *read_mapping_page_async(
274 static inline struct page *read_mapping_page(
struct address_space *mapping,
284 static inline loff_t
page_offset(
struct page *page)
289 static inline loff_t page_file_offset(
struct page *page)
301 if (
unlikely(is_vm_hugetlb_page(vma)))
314 static inline void __set_page_locked(
struct page *page)
319 static inline void __clear_page_locked(
struct page *page)
324 static inline int trylock_page(
struct page *page)
332 static inline void lock_page(
struct page *page)
335 if (!trylock_page(page))
344 static inline int lock_page_killable(
struct page *page)
347 if (!trylock_page(page))
356 static inline int lock_page_or_retry(
struct page *page,
struct mm_struct *mm,
371 static inline int wait_on_page_locked_killable(
struct page *page)
373 if (PageLocked(page))
385 static inline void wait_on_page_locked(
struct page *page)
387 if (PageLocked(page))
394 static inline void wait_on_page_writeback(
struct page *page)
396 if (PageWriteback(page))
413 static inline int fault_in_pages_writeable(
char __user *
uaddr,
int size)
432 if (((
unsigned long)uaddr &
PAGE_MASK) !=
433 ((
unsigned long)end & PAGE_MASK))
439 static inline int fault_in_pages_readable(
const char __user *uaddr,
int size)
449 const char __user *end = uaddr + size - 1;
451 if (((
unsigned long)uaddr & PAGE_MASK) !=
452 ((
unsigned long)end & PAGE_MASK)) {
466 static inline int fault_in_multipages_writeable(
char __user *uaddr,
int size)
469 char __user *end = uaddr + size - 1;
478 while (uaddr <= end) {
486 if (((
unsigned long)uaddr & PAGE_MASK) ==
487 ((
unsigned long)end & PAGE_MASK))
493 static inline int fault_in_multipages_readable(
const char __user *uaddr,
498 const char __user *end = uaddr + size - 1;
503 while (uaddr <= end) {
511 if (((
unsigned long)uaddr & PAGE_MASK) ==
512 ((
unsigned long)end & PAGE_MASK)) {
532 static inline int add_to_page_cache(
struct page *page,
537 __set_page_locked(page);
540 __clear_page_locked(page);