Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
addr.c
Go to the documentation of this file.
2 
3 #include <linux/backing-dev.h>
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/pagemap.h>
7 #include <linux/writeback.h> /* generic_writepages */
8 #include <linux/slab.h>
9 #include <linux/pagevec.h>
11 
12 #include "super.h"
13 #include "mds_client.h"
14 #include <linux/ceph/osd_client.h>
15 
16 /*
17  * Ceph address space ops.
18  *
19  * There are a few funny things going on here.
20  *
21  * The page->private field is used to reference a struct
22  * ceph_snap_context for _every_ dirty page. This indicates which
23  * snapshot the page was logically dirtied in, and thus which snap
24  * context needs to be associated with the osd write during writeback.
25  *
26  * Similarly, struct ceph_inode_info maintains a set of counters to
27  * count dirty pages on the inode. In the absence of snapshots,
28  * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
29  *
30  * When a snapshot is taken (that is, when the client receives
31  * notification that a snapshot was taken), each inode with caps and
32  * with dirty pages (dirty pages implies there is a cap) gets a new
33  * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
34  * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
35  * moved to capsnap->dirty. (Unless a sync write is currently in
36  * progress. In that case, the capsnap is said to be "pending", new
37  * writes cannot start, and the capsnap isn't "finalized" until the
38  * write completes (or fails) and a final size/mtime for the inode for
39  * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
40  *
41  * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
42  * we look for the first capsnap in i_cap_snaps and write out pages in
43  * that snap context _only_. Then we move on to the next capsnap,
44  * eventually reaching the "live" or "head" context (i.e., pages that
45  * are not yet snapped) and are writing the most recently dirtied
46  * pages.
47  *
48  * Invalidate and so forth must take care to ensure the dirty page
49  * accounting is preserved.
50  */
51 
52 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
53 #define CONGESTION_OFF_THRESH(congestion_kb) \
54  (CONGESTION_ON_THRESH(congestion_kb) - \
55  (CONGESTION_ON_THRESH(congestion_kb) >> 2))
56 
57 static inline struct ceph_snap_context *page_snap_context(struct page *page)
58 {
59  if (PagePrivate(page))
60  return (void *)page->private;
61  return NULL;
62 }
63 
64 /*
65  * Dirty a page. Optimistically adjust accounting, on the assumption
66  * that we won't race with invalidate. If we do, readjust.
67  */
68 static int ceph_set_page_dirty(struct page *page)
69 {
70  struct address_space *mapping = page->mapping;
71  struct inode *inode;
72  struct ceph_inode_info *ci;
73  int undo = 0;
74  struct ceph_snap_context *snapc;
75 
76  if (unlikely(!mapping))
77  return !TestSetPageDirty(page);
78 
79  if (TestSetPageDirty(page)) {
80  dout("%p set_page_dirty %p idx %lu -- already dirty\n",
81  mapping->host, page, page->index);
82  return 0;
83  }
84 
85  inode = mapping->host;
86  ci = ceph_inode(inode);
87 
88  /*
89  * Note that we're grabbing a snapc ref here without holding
90  * any locks!
91  */
92  snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
93 
94  /* dirty the head */
95  spin_lock(&ci->i_ceph_lock);
96  if (ci->i_head_snapc == NULL)
97  ci->i_head_snapc = ceph_get_snap_context(snapc);
98  ++ci->i_wrbuffer_ref_head;
99  if (ci->i_wrbuffer_ref == 0)
100  ihold(inode);
101  ++ci->i_wrbuffer_ref;
102  dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
103  "snapc %p seq %lld (%d snaps)\n",
104  mapping->host, page, page->index,
105  ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
107  snapc, snapc->seq, snapc->num_snaps);
108  spin_unlock(&ci->i_ceph_lock);
109 
110  /* now adjust page */
111  spin_lock_irq(&mapping->tree_lock);
112  if (page->mapping) { /* Race with truncate? */
113  WARN_ON_ONCE(!PageUptodate(page));
114  account_page_dirtied(page, page->mapping);
115  radix_tree_tag_set(&mapping->page_tree,
116  page_index(page), PAGECACHE_TAG_DIRTY);
117 
118  /*
119  * Reference snap context in page->private. Also set
120  * PagePrivate so that we get invalidatepage callback.
121  */
122  page->private = (unsigned long)snapc;
123  SetPagePrivate(page);
124  } else {
125  dout("ANON set_page_dirty %p (raced truncate?)\n", page);
126  undo = 1;
127  }
128 
129  spin_unlock_irq(&mapping->tree_lock);
130 
131  if (undo)
132  /* whoops, we failed to dirty the page */
133  ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
134 
136 
137  BUG_ON(!PageDirty(page));
138  return 1;
139 }
140 
141 /*
142  * If we are truncating the full page (i.e. offset == 0), adjust the
143  * dirty page counters appropriately. Only called if there is private
144  * data on the page.
145  */
146 static void ceph_invalidatepage(struct page *page, unsigned long offset)
147 {
148  struct inode *inode;
149  struct ceph_inode_info *ci;
150  struct ceph_snap_context *snapc = page_snap_context(page);
151 
152  BUG_ON(!PageLocked(page));
153  BUG_ON(!PagePrivate(page));
154  BUG_ON(!page->mapping);
155 
156  inode = page->mapping->host;
157 
158  /*
159  * We can get non-dirty pages here due to races between
160  * set_page_dirty and truncate_complete_page; just spit out a
161  * warning, in case we end up with accounting problems later.
162  */
163  if (!PageDirty(page))
164  pr_err("%p invalidatepage %p page not dirty\n", inode, page);
165 
166  if (offset == 0)
167  ClearPageChecked(page);
168 
169  ci = ceph_inode(inode);
170  if (offset == 0) {
171  dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
172  inode, page, page->index, offset);
173  ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
174  ceph_put_snap_context(snapc);
175  page->private = 0;
176  ClearPagePrivate(page);
177  } else {
178  dout("%p invalidatepage %p idx %lu partial dirty page\n",
179  inode, page, page->index);
180  }
181 }
182 
183 /* just a sanity check */
184 static int ceph_releasepage(struct page *page, gfp_t g)
185 {
186  struct inode *inode = page->mapping ? page->mapping->host : NULL;
187  dout("%p releasepage %p idx %lu\n", inode, page, page->index);
188  WARN_ON(PageDirty(page));
189  WARN_ON(PagePrivate(page));
190  return 0;
191 }
192 
193 /*
194  * read a single page, without unlocking it.
195  */
196 static int readpage_nounlock(struct file *filp, struct page *page)
197 {
198  struct inode *inode = filp->f_dentry->d_inode;
199  struct ceph_inode_info *ci = ceph_inode(inode);
200  struct ceph_osd_client *osdc =
201  &ceph_inode_to_client(inode)->client->osdc;
202  int err = 0;
204 
205  dout("readpage inode %p file %p page %p index %lu\n",
206  inode, filp, page, page->index);
207  err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
208  (u64) page_offset(page), &len,
210  &page, 1, 0);
211  if (err == -ENOENT)
212  err = 0;
213  if (err < 0) {
214  SetPageError(page);
215  goto out;
216  } else if (err < PAGE_CACHE_SIZE) {
217  /* zero fill remainder of page */
218  zero_user_segment(page, err, PAGE_CACHE_SIZE);
219  }
220  SetPageUptodate(page);
221 
222 out:
223  return err < 0 ? err : 0;
224 }
225 
226 static int ceph_readpage(struct file *filp, struct page *page)
227 {
228  int r = readpage_nounlock(filp, page);
229  unlock_page(page);
230  return r;
231 }
232 
233 /*
234  * Finish an async read(ahead) op.
235  */
236 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
237 {
238  struct inode *inode = req->r_inode;
239  struct ceph_osd_reply_head *replyhead;
240  int rc, bytes;
241  int i;
242 
243  /* parse reply */
244  replyhead = msg->front.iov_base;
245  WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
246  rc = le32_to_cpu(replyhead->result);
247  bytes = le32_to_cpu(msg->hdr.data_len);
248 
249  dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
250 
251  /* unlock all pages, zeroing any data we didn't read */
252  for (i = 0; i < req->r_num_pages; i++, bytes -= PAGE_CACHE_SIZE) {
253  struct page *page = req->r_pages[i];
254 
255  if (bytes < (int)PAGE_CACHE_SIZE) {
256  /* zero (remainder of) page */
257  int s = bytes < 0 ? 0 : bytes;
258  zero_user_segment(page, s, PAGE_CACHE_SIZE);
259  }
260  dout("finish_read %p uptodate %p idx %lu\n", inode, page,
261  page->index);
262  flush_dcache_page(page);
263  SetPageUptodate(page);
264  unlock_page(page);
265  page_cache_release(page);
266  }
267  kfree(req->r_pages);
268 }
269 
270 /*
271  * start an async read(ahead) operation. return nr_pages we submitted
272  * a read for on success, or negative error code.
273  */
274 static int start_read(struct inode *inode, struct list_head *page_list, int max)
275 {
276  struct ceph_osd_client *osdc =
277  &ceph_inode_to_client(inode)->client->osdc;
278  struct ceph_inode_info *ci = ceph_inode(inode);
279  struct page *page = list_entry(page_list->prev, struct page, lru);
280  struct ceph_osd_request *req;
281  u64 off;
282  u64 len;
283  int i;
284  struct page **pages;
285  pgoff_t next_index;
286  int nr_pages = 0;
287  int ret;
288 
289  off = (u64) page_offset(page);
290 
291  /* count pages */
292  next_index = page->index;
293  list_for_each_entry_reverse(page, page_list, lru) {
294  if (page->index != next_index)
295  break;
296  nr_pages++;
297  next_index++;
298  if (max && nr_pages == max)
299  break;
300  }
301  len = nr_pages << PAGE_CACHE_SHIFT;
302  dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
303  off, len);
304 
305  req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
306  off, &len,
308  NULL, 0,
310  NULL, false, 1, 0);
311  if (IS_ERR(req))
312  return PTR_ERR(req);
313 
314  /* build page vector */
315  nr_pages = len >> PAGE_CACHE_SHIFT;
316  pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS);
317  ret = -ENOMEM;
318  if (!pages)
319  goto out;
320  for (i = 0; i < nr_pages; ++i) {
321  page = list_entry(page_list->prev, struct page, lru);
322  BUG_ON(PageLocked(page));
323  list_del(&page->lru);
324 
325  dout("start_read %p adding %p idx %lu\n", inode, page,
326  page->index);
327  if (add_to_page_cache_lru(page, &inode->i_data, page->index,
328  GFP_NOFS)) {
329  page_cache_release(page);
330  dout("start_read %p add_to_page_cache failed %p\n",
331  inode, page);
332  nr_pages = i;
333  goto out_pages;
334  }
335  pages[i] = page;
336  }
337  req->r_pages = pages;
338  req->r_num_pages = nr_pages;
339  req->r_callback = finish_read;
340  req->r_inode = inode;
341 
342  dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
343  ret = ceph_osdc_start_request(osdc, req, false);
344  if (ret < 0)
345  goto out_pages;
346  ceph_osdc_put_request(req);
347  return nr_pages;
348 
349 out_pages:
350  ceph_release_page_vector(pages, nr_pages);
351 out:
352  ceph_osdc_put_request(req);
353  return ret;
354 }
355 
356 
357 /*
358  * Read multiple pages. Leave pages we don't read + unlock in page_list;
359  * the caller (VM) cleans them up.
360  */
361 static int ceph_readpages(struct file *file, struct address_space *mapping,
362  struct list_head *page_list, unsigned nr_pages)
363 {
364  struct inode *inode = file->f_dentry->d_inode;
365  struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
366  int rc = 0;
367  int max = 0;
368 
369  if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
370  max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
371  >> PAGE_SHIFT;
372 
373  dout("readpages %p file %p nr_pages %d max %d\n", inode, file, nr_pages,
374  max);
375  while (!list_empty(page_list)) {
376  rc = start_read(inode, page_list, max);
377  if (rc < 0)
378  goto out;
379  BUG_ON(rc == 0);
380  }
381 out:
382  dout("readpages %p file %p ret %d\n", inode, file, rc);
383  return rc;
384 }
385 
386 /*
387  * Get ref for the oldest snapc for an inode with dirty data... that is, the
388  * only snap context we are allowed to write back.
389  */
390 static struct ceph_snap_context *get_oldest_context(struct inode *inode,
391  u64 *snap_size)
392 {
393  struct ceph_inode_info *ci = ceph_inode(inode);
394  struct ceph_snap_context *snapc = NULL;
395  struct ceph_cap_snap *capsnap = NULL;
396 
397  spin_lock(&ci->i_ceph_lock);
398  list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
399  dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
400  capsnap->context, capsnap->dirty_pages);
401  if (capsnap->dirty_pages) {
402  snapc = ceph_get_snap_context(capsnap->context);
403  if (snap_size)
404  *snap_size = capsnap->size;
405  break;
406  }
407  }
408  if (!snapc && ci->i_wrbuffer_ref_head) {
409  snapc = ceph_get_snap_context(ci->i_head_snapc);
410  dout(" head snapc %p has %d dirty pages\n",
411  snapc, ci->i_wrbuffer_ref_head);
412  }
413  spin_unlock(&ci->i_ceph_lock);
414  return snapc;
415 }
416 
417 /*
418  * Write a single page, but leave the page locked.
419  *
420  * If we get a write error, set the page error bit, but still adjust the
421  * dirty page accounting (i.e., page is no longer dirty).
422  */
423 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
424 {
425  struct inode *inode;
426  struct ceph_inode_info *ci;
427  struct ceph_fs_client *fsc;
428  struct ceph_osd_client *osdc;
429  loff_t page_off = page_offset(page);
430  int len = PAGE_CACHE_SIZE;
431  loff_t i_size;
432  int err = 0;
433  struct ceph_snap_context *snapc, *oldest;
434  u64 snap_size = 0;
435  long writeback_stat;
436 
437  dout("writepage %p idx %lu\n", page, page->index);
438 
439  if (!page->mapping || !page->mapping->host) {
440  dout("writepage %p - no mapping\n", page);
441  return -EFAULT;
442  }
443  inode = page->mapping->host;
444  ci = ceph_inode(inode);
445  fsc = ceph_inode_to_client(inode);
446  osdc = &fsc->client->osdc;
447 
448  /* verify this is a writeable snap context */
449  snapc = page_snap_context(page);
450  if (snapc == NULL) {
451  dout("writepage %p page %p not dirty?\n", inode, page);
452  goto out;
453  }
454  oldest = get_oldest_context(inode, &snap_size);
455  if (snapc->seq > oldest->seq) {
456  dout("writepage %p page %p snapc %p not writeable - noop\n",
457  inode, page, snapc);
458  /* we should only noop if called by kswapd */
459  WARN_ON((current->flags & PF_MEMALLOC) == 0);
460  ceph_put_snap_context(oldest);
461  goto out;
462  }
463  ceph_put_snap_context(oldest);
464 
465  /* is this a partial page at end of file? */
466  if (snap_size)
467  i_size = snap_size;
468  else
469  i_size = i_size_read(inode);
470  if (i_size < page_off + len)
471  len = i_size - page_off;
472 
473  dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
474  inode, page, page->index, page_off, len, snapc);
475 
476  writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
477  if (writeback_stat >
478  CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
480 
481  set_page_writeback(page);
482  err = ceph_osdc_writepages(osdc, ceph_vino(inode),
483  &ci->i_layout, snapc,
484  page_off, len,
486  &inode->i_mtime,
487  &page, 1, 0, 0, true);
488  if (err < 0) {
489  dout("writepage setting page/mapping error %d %p\n", err, page);
490  SetPageError(page);
491  mapping_set_error(&inode->i_data, err);
492  if (wbc)
493  wbc->pages_skipped++;
494  } else {
495  dout("writepage cleaned page %p\n", page);
496  err = 0; /* vfs expects us to return 0 */
497  }
498  page->private = 0;
499  ClearPagePrivate(page);
500  end_page_writeback(page);
501  ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
502  ceph_put_snap_context(snapc); /* page's reference */
503 out:
504  return err;
505 }
506 
507 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
508 {
509  int err;
510  struct inode *inode = page->mapping->host;
511  BUG_ON(!inode);
512  ihold(inode);
513  err = writepage_nounlock(page, wbc);
514  unlock_page(page);
515  iput(inode);
516  return err;
517 }
518 
519 
520 /*
521  * lame release_pages helper. release_pages() isn't exported to
522  * modules.
523  */
524 static void ceph_release_pages(struct page **pages, int num)
525 {
526  struct pagevec pvec;
527  int i;
528 
529  pagevec_init(&pvec, 0);
530  for (i = 0; i < num; i++) {
531  if (pagevec_add(&pvec, pages[i]) == 0)
532  pagevec_release(&pvec);
533  }
534  pagevec_release(&pvec);
535 }
536 
537 
538 /*
539  * async writeback completion handler.
540  *
541  * If we get an error, set the mapping error bit, but not the individual
542  * page error bits.
543  */
544 static void writepages_finish(struct ceph_osd_request *req,
545  struct ceph_msg *msg)
546 {
547  struct inode *inode = req->r_inode;
548  struct ceph_osd_reply_head *replyhead;
549  struct ceph_osd_op *op;
550  struct ceph_inode_info *ci = ceph_inode(inode);
551  unsigned wrote;
552  struct page *page;
553  int i;
554  struct ceph_snap_context *snapc = req->r_snapc;
555  struct address_space *mapping = inode->i_mapping;
556  __s32 rc = -EIO;
557  u64 bytes = 0;
558  struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
559  long writeback_stat;
560  unsigned issued = ceph_caps_issued(ci);
561 
562  /* parse reply */
563  replyhead = msg->front.iov_base;
564  WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
565  op = (void *)(replyhead + 1);
566  rc = le32_to_cpu(replyhead->result);
567  bytes = le64_to_cpu(op->extent.length);
568 
569  if (rc >= 0) {
570  /*
571  * Assume we wrote the pages we originally sent. The
572  * osd might reply with fewer pages if our writeback
573  * raced with a truncation and was adjusted at the osd,
574  * so don't believe the reply.
575  */
576  wrote = req->r_num_pages;
577  } else {
578  wrote = 0;
579  mapping_set_error(mapping, rc);
580  }
581  dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
582  inode, rc, bytes, wrote);
583 
584  /* clean all pages */
585  for (i = 0; i < req->r_num_pages; i++) {
586  page = req->r_pages[i];
587  BUG_ON(!page);
588  WARN_ON(!PageUptodate(page));
589 
590  writeback_stat =
591  atomic_long_dec_return(&fsc->writeback_count);
592  if (writeback_stat <
593  CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
595  BLK_RW_ASYNC);
596 
597  ceph_put_snap_context(page_snap_context(page));
598  page->private = 0;
599  ClearPagePrivate(page);
600  dout("unlocking %d %p\n", i, page);
601  end_page_writeback(page);
602 
603  /*
604  * We lost the cache cap, need to truncate the page before
605  * it is unlocked, otherwise we'd truncate it later in the
606  * page truncation thread, possibly losing some data that
607  * raced its way in
608  */
609  if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
610  generic_error_remove_page(inode->i_mapping, page);
611 
612  unlock_page(page);
613  }
614  dout("%p wrote+cleaned %d pages\n", inode, wrote);
615  ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc);
616 
617  ceph_release_pages(req->r_pages, req->r_num_pages);
618  if (req->r_pages_from_pool)
619  mempool_free(req->r_pages,
620  ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
621  else
622  kfree(req->r_pages);
623  ceph_osdc_put_request(req);
624 }
625 
626 /*
627  * allocate a page vec, either directly, or if necessary, via a the
628  * mempool. we avoid the mempool if we can because req->r_num_pages
629  * may be less than the maximum write size.
630  */
631 static void alloc_page_vec(struct ceph_fs_client *fsc,
632  struct ceph_osd_request *req)
633 {
634  req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
635  GFP_NOFS);
636  if (!req->r_pages) {
638  req->r_pages_from_pool = 1;
639  WARN_ON(!req->r_pages);
640  }
641 }
642 
643 /*
644  * initiate async writeback
645  */
646 static int ceph_writepages_start(struct address_space *mapping,
647  struct writeback_control *wbc)
648 {
649  struct inode *inode = mapping->host;
650  struct ceph_inode_info *ci = ceph_inode(inode);
651  struct ceph_fs_client *fsc;
653  int range_whole = 0;
654  int should_loop = 1;
655  pgoff_t max_pages = 0, max_pages_ever = 0;
656  struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
657  struct pagevec pvec;
658  int done = 0;
659  int rc = 0;
660  unsigned wsize = 1 << inode->i_blkbits;
661  struct ceph_osd_request *req = NULL;
662  int do_sync;
663  u64 snap_size = 0;
664 
665  /*
666  * Include a 'sync' in the OSD request if this is a data
667  * integrity write (e.g., O_SYNC write or fsync()), or if our
668  * cap is being revoked.
669  */
670  do_sync = wbc->sync_mode == WB_SYNC_ALL;
672  do_sync = 1;
673  dout("writepages_start %p dosync=%d (mode=%s)\n",
674  inode, do_sync,
675  wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
676  (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
677 
678  fsc = ceph_inode_to_client(inode);
679  if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
680  pr_warning("writepage_start %p on forced umount\n", inode);
681  return -EIO; /* we're in a forced umount, don't write! */
682  }
683  if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
684  wsize = fsc->mount_options->wsize;
685  if (wsize < PAGE_CACHE_SIZE)
686  wsize = PAGE_CACHE_SIZE;
687  max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
688 
689  pagevec_init(&pvec, 0);
690 
691  /* where to start/end? */
692  if (wbc->range_cyclic) {
693  start = mapping->writeback_index; /* Start from prev offset */
694  end = -1;
695  dout(" cyclic, start at %lu\n", start);
696  } else {
697  start = wbc->range_start >> PAGE_CACHE_SHIFT;
698  end = wbc->range_end >> PAGE_CACHE_SHIFT;
699  if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
700  range_whole = 1;
701  should_loop = 0;
702  dout(" not cyclic, %lu to %lu\n", start, end);
703  }
704  index = start;
705 
706 retry:
707  /* find oldest snap context with dirty data */
708  ceph_put_snap_context(snapc);
709  snapc = get_oldest_context(inode, &snap_size);
710  if (!snapc) {
711  /* hmm, why does writepages get called when there
712  is no dirty data? */
713  dout(" no snap context with dirty data?\n");
714  goto out;
715  }
716  dout(" oldest snapc is %p seq %lld (%d snaps)\n",
717  snapc, snapc->seq, snapc->num_snaps);
718  if (last_snapc && snapc != last_snapc) {
719  /* if we switched to a newer snapc, restart our scan at the
720  * start of the original file range. */
721  dout(" snapc differs from last pass, restarting at %lu\n",
722  index);
723  index = start;
724  }
725  last_snapc = snapc;
726 
727  while (!done && index <= end) {
728  unsigned i;
729  int first;
730  pgoff_t next;
731  int pvec_pages, locked_pages;
732  struct page *page;
733  int want;
734  u64 offset, len;
735  struct ceph_osd_request_head *reqhead;
736  struct ceph_osd_op *op;
737  long writeback_stat;
738 
739  next = 0;
740  locked_pages = 0;
741  max_pages = max_pages_ever;
742 
743 get_more_pages:
744  first = -1;
745  want = min(end - index,
747  max_pages - (pgoff_t)locked_pages) - 1)
748  + 1;
749  pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
751  want);
752  dout("pagevec_lookup_tag got %d\n", pvec_pages);
753  if (!pvec_pages && !locked_pages)
754  break;
755  for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
756  page = pvec.pages[i];
757  dout("? %p idx %lu\n", page, page->index);
758  if (locked_pages == 0)
759  lock_page(page); /* first page */
760  else if (!trylock_page(page))
761  break;
762 
763  /* only dirty pages, or our accounting breaks */
764  if (unlikely(!PageDirty(page)) ||
765  unlikely(page->mapping != mapping)) {
766  dout("!dirty or !mapping %p\n", page);
767  unlock_page(page);
768  break;
769  }
770  if (!wbc->range_cyclic && page->index > end) {
771  dout("end of range %p\n", page);
772  done = 1;
773  unlock_page(page);
774  break;
775  }
776  if (next && (page->index != next)) {
777  dout("not consecutive %p\n", page);
778  unlock_page(page);
779  break;
780  }
781  if (wbc->sync_mode != WB_SYNC_NONE) {
782  dout("waiting on writeback %p\n", page);
783  wait_on_page_writeback(page);
784  }
785  if ((snap_size && page_offset(page) > snap_size) ||
786  (!snap_size &&
787  page_offset(page) > i_size_read(inode))) {
788  dout("%p page eof %llu\n", page, snap_size ?
789  snap_size : i_size_read(inode));
790  done = 1;
791  unlock_page(page);
792  break;
793  }
794  if (PageWriteback(page)) {
795  dout("%p under writeback\n", page);
796  unlock_page(page);
797  break;
798  }
799 
800  /* only if matching snap context */
801  pgsnapc = page_snap_context(page);
802  if (pgsnapc->seq > snapc->seq) {
803  dout("page snapc %p %lld > oldest %p %lld\n",
804  pgsnapc, pgsnapc->seq, snapc, snapc->seq);
805  unlock_page(page);
806  if (!locked_pages)
807  continue; /* keep looking for snap */
808  break;
809  }
810 
811  if (!clear_page_dirty_for_io(page)) {
812  dout("%p !clear_page_dirty_for_io\n", page);
813  unlock_page(page);
814  break;
815  }
816 
817  /* ok */
818  if (locked_pages == 0) {
819  /* prepare async write request */
820  offset = (u64) page_offset(page);
821  len = wsize;
822  req = ceph_osdc_new_request(&fsc->client->osdc,
823  &ci->i_layout,
824  ceph_vino(inode),
825  offset, &len,
829  snapc, do_sync,
830  ci->i_truncate_seq,
831  ci->i_truncate_size,
832  &inode->i_mtime, true, 1, 0);
833 
834  if (IS_ERR(req)) {
835  rc = PTR_ERR(req);
836  unlock_page(page);
837  break;
838  }
839 
840  max_pages = req->r_num_pages;
841 
842  alloc_page_vec(fsc, req);
843  req->r_callback = writepages_finish;
844  req->r_inode = inode;
845  }
846 
847  /* note position of first page in pvec */
848  if (first < 0)
849  first = i;
850  dout("%p will write page %p idx %lu\n",
851  inode, page, page->index);
852 
853  writeback_stat =
854  atomic_long_inc_return(&fsc->writeback_count);
855  if (writeback_stat > CONGESTION_ON_THRESH(
856  fsc->mount_options->congestion_kb)) {
858  BLK_RW_ASYNC);
859  }
860 
861  set_page_writeback(page);
862  req->r_pages[locked_pages] = page;
863  locked_pages++;
864  next = page->index + 1;
865  }
866 
867  /* did we get anything? */
868  if (!locked_pages)
869  goto release_pvec_pages;
870  if (i) {
871  int j;
872  BUG_ON(!locked_pages || first < 0);
873 
874  if (pvec_pages && i == pvec_pages &&
875  locked_pages < max_pages) {
876  dout("reached end pvec, trying for more\n");
877  pagevec_reinit(&pvec);
878  goto get_more_pages;
879  }
880 
881  /* shift unused pages over in the pvec... we
882  * will need to release them below. */
883  for (j = i; j < pvec_pages; j++) {
884  dout(" pvec leftover page %p\n",
885  pvec.pages[j]);
886  pvec.pages[j-i+first] = pvec.pages[j];
887  }
888  pvec.nr -= i-first;
889  }
890 
891  /* submit the write */
892  offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
893  len = min((snap_size ? snap_size : i_size_read(inode)) - offset,
894  (u64)locked_pages << PAGE_CACHE_SHIFT);
895  dout("writepages got %d pages at %llu~%llu\n",
896  locked_pages, offset, len);
897 
898  /* revise final length, page count */
899  req->r_num_pages = locked_pages;
900  reqhead = req->r_request->front.iov_base;
901  op = (void *)(reqhead + 1);
902  op->extent.length = cpu_to_le64(len);
903  op->payload_len = cpu_to_le32(len);
904  req->r_request->hdr.data_len = cpu_to_le32(len);
905 
906  rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
907  BUG_ON(rc);
908  req = NULL;
909 
910  /* continue? */
911  index = next;
912  wbc->nr_to_write -= locked_pages;
913  if (wbc->nr_to_write <= 0)
914  done = 1;
915 
916 release_pvec_pages:
917  dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
918  pvec.nr ? pvec.pages[0] : NULL);
919  pagevec_release(&pvec);
920 
921  if (locked_pages && !done)
922  goto retry;
923  }
924 
925  if (should_loop && !done) {
926  /* more to do; loop back to beginning of file */
927  dout("writepages looping back to beginning of file\n");
928  should_loop = 0;
929  index = 0;
930  goto retry;
931  }
932 
933  if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
934  mapping->writeback_index = index;
935 
936 out:
937  if (req)
938  ceph_osdc_put_request(req);
939  ceph_put_snap_context(snapc);
940  dout("writepages done, rc = %d\n", rc);
941  return rc;
942 }
943 
944 
945 
946 /*
947  * See if a given @snapc is either writeable, or already written.
948  */
949 static int context_is_writeable_or_written(struct inode *inode,
950  struct ceph_snap_context *snapc)
951 {
952  struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
953  int ret = !oldest || snapc->seq <= oldest->seq;
954 
955  ceph_put_snap_context(oldest);
956  return ret;
957 }
958 
959 /*
960  * We are only allowed to write into/dirty the page if the page is
961  * clean, or already dirty within the same snap context.
962  *
963  * called with page locked.
964  * return success with page locked,
965  * or any failure (incl -EAGAIN) with page unlocked.
966  */
967 static int ceph_update_writeable_page(struct file *file,
968  loff_t pos, unsigned len,
969  struct page *page)
970 {
971  struct inode *inode = file->f_dentry->d_inode;
972  struct ceph_inode_info *ci = ceph_inode(inode);
973  struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
974  loff_t page_off = pos & PAGE_CACHE_MASK;
975  int pos_in_page = pos & ~PAGE_CACHE_MASK;
976  int end_in_page = pos_in_page + len;
977  loff_t i_size;
978  int r;
979  struct ceph_snap_context *snapc, *oldest;
980 
981 retry_locked:
982  /* writepages currently holds page lock, but if we change that later, */
983  wait_on_page_writeback(page);
984 
985  /* check snap context */
986  BUG_ON(!ci->i_snap_realm);
987  down_read(&mdsc->snap_rwsem);
988  BUG_ON(!ci->i_snap_realm->cached_context);
989  snapc = page_snap_context(page);
990  if (snapc && snapc != ci->i_head_snapc) {
991  /*
992  * this page is already dirty in another (older) snap
993  * context! is it writeable now?
994  */
995  oldest = get_oldest_context(inode, NULL);
996  up_read(&mdsc->snap_rwsem);
997 
998  if (snapc->seq > oldest->seq) {
999  ceph_put_snap_context(oldest);
1000  dout(" page %p snapc %p not current or oldest\n",
1001  page, snapc);
1002  /*
1003  * queue for writeback, and wait for snapc to
1004  * be writeable or written
1005  */
1006  snapc = ceph_get_snap_context(snapc);
1007  unlock_page(page);
1008  ceph_queue_writeback(inode);
1010  context_is_writeable_or_written(inode, snapc));
1011  ceph_put_snap_context(snapc);
1012  if (r == -ERESTARTSYS)
1013  return r;
1014  return -EAGAIN;
1015  }
1016  ceph_put_snap_context(oldest);
1017 
1018  /* yay, writeable, do it now (without dropping page lock) */
1019  dout(" page %p snapc %p not current, but oldest\n",
1020  page, snapc);
1021  if (!clear_page_dirty_for_io(page))
1022  goto retry_locked;
1023  r = writepage_nounlock(page, NULL);
1024  if (r < 0)
1025  goto fail_nosnap;
1026  goto retry_locked;
1027  }
1028 
1029  if (PageUptodate(page)) {
1030  dout(" page %p already uptodate\n", page);
1031  return 0;
1032  }
1033 
1034  /* full page? */
1035  if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
1036  return 0;
1037 
1038  /* past end of file? */
1039  i_size = inode->i_size; /* caller holds i_mutex */
1040 
1041  if (i_size + len > inode->i_sb->s_maxbytes) {
1042  /* file is too big */
1043  r = -EINVAL;
1044  goto fail;
1045  }
1046 
1047  if (page_off >= i_size ||
1048  (pos_in_page == 0 && (pos+len) >= i_size &&
1049  end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
1050  dout(" zeroing %p 0 - %d and %d - %d\n",
1051  page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
1052  zero_user_segments(page,
1053  0, pos_in_page,
1054  end_in_page, PAGE_CACHE_SIZE);
1055  return 0;
1056  }
1057 
1058  /* we need to read it. */
1059  up_read(&mdsc->snap_rwsem);
1060  r = readpage_nounlock(file, page);
1061  if (r < 0)
1062  goto fail_nosnap;
1063  goto retry_locked;
1064 
1065 fail:
1066  up_read(&mdsc->snap_rwsem);
1067 fail_nosnap:
1068  unlock_page(page);
1069  return r;
1070 }
1071 
1072 /*
1073  * We are only allowed to write into/dirty the page if the page is
1074  * clean, or already dirty within the same snap context.
1075  */
1076 static int ceph_write_begin(struct file *file, struct address_space *mapping,
1077  loff_t pos, unsigned len, unsigned flags,
1078  struct page **pagep, void **fsdata)
1079 {
1080  struct inode *inode = file->f_dentry->d_inode;
1081  struct page *page;
1082  pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1083  int r;
1084 
1085  do {
1086  /* get a page */
1087  page = grab_cache_page_write_begin(mapping, index, 0);
1088  if (!page)
1089  return -ENOMEM;
1090  *pagep = page;
1091 
1092  dout("write_begin file %p inode %p page %p %d~%d\n", file,
1093  inode, page, (int)pos, (int)len);
1094 
1095  r = ceph_update_writeable_page(file, pos, len, page);
1096  } while (r == -EAGAIN);
1097 
1098  return r;
1099 }
1100 
1101 /*
1102  * we don't do anything in here that simple_write_end doesn't do
1103  * except adjust dirty page accounting and drop read lock on
1104  * mdsc->snap_rwsem.
1105  */
1106 static int ceph_write_end(struct file *file, struct address_space *mapping,
1107  loff_t pos, unsigned len, unsigned copied,
1108  struct page *page, void *fsdata)
1109 {
1110  struct inode *inode = file->f_dentry->d_inode;
1111  struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1112  struct ceph_mds_client *mdsc = fsc->mdsc;
1113  unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1114  int check_cap = 0;
1115 
1116  dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1117  inode, page, (int)pos, (int)copied, (int)len);
1118 
1119  /* zero the stale part of the page if we did a short copy */
1120  if (copied < len)
1121  zero_user_segment(page, from+copied, len);
1122 
1123  /* did file size increase? */
1124  /* (no need for i_size_read(); we caller holds i_mutex */
1125  if (pos+copied > inode->i_size)
1126  check_cap = ceph_inode_set_size(inode, pos+copied);
1127 
1128  if (!PageUptodate(page))
1129  SetPageUptodate(page);
1130 
1131  set_page_dirty(page);
1132 
1133  unlock_page(page);
1134  up_read(&mdsc->snap_rwsem);
1135  page_cache_release(page);
1136 
1137  if (check_cap)
1138  ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1139 
1140  return copied;
1141 }
1142 
1143 /*
1144  * we set .direct_IO to indicate direct io is supported, but since we
1145  * intercept O_DIRECT reads and writes early, this function should
1146  * never get called.
1147  */
1148 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
1149  const struct iovec *iov,
1150  loff_t pos, unsigned long nr_segs)
1151 {
1152  WARN_ON(1);
1153  return -EINVAL;
1154 }
1155 
1157  .readpage = ceph_readpage,
1158  .readpages = ceph_readpages,
1159  .writepage = ceph_writepage,
1160  .writepages = ceph_writepages_start,
1161  .write_begin = ceph_write_begin,
1162  .write_end = ceph_write_end,
1163  .set_page_dirty = ceph_set_page_dirty,
1164  .invalidatepage = ceph_invalidatepage,
1165  .releasepage = ceph_releasepage,
1166  .direct_IO = ceph_direct_io,
1167 };
1168 
1169 
1170 /*
1171  * vm ops
1172  */
1173 
1174 /*
1175  * Reuse write_begin here for simplicity.
1176  */
1177 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1178 {
1179  struct inode *inode = vma->vm_file->f_dentry->d_inode;
1180  struct page *page = vmf->page;
1181  struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1182  loff_t off = page_offset(page);
1183  loff_t size, len;
1184  int ret;
1185 
1186  /* Update time before taking page lock */
1187  file_update_time(vma->vm_file);
1188 
1189  size = i_size_read(inode);
1190  if (off + PAGE_CACHE_SIZE <= size)
1191  len = PAGE_CACHE_SIZE;
1192  else
1193  len = size & ~PAGE_CACHE_MASK;
1194 
1195  dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode,
1196  off, len, page, page->index);
1197 
1198  lock_page(page);
1199 
1200  ret = VM_FAULT_NOPAGE;
1201  if ((off > size) ||
1202  (page->mapping != inode->i_mapping))
1203  goto out;
1204 
1205  ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
1206  if (ret == 0) {
1207  /* success. we'll keep the page locked. */
1208  set_page_dirty(page);
1209  up_read(&mdsc->snap_rwsem);
1210  ret = VM_FAULT_LOCKED;
1211  } else {
1212  if (ret == -ENOMEM)
1213  ret = VM_FAULT_OOM;
1214  else
1215  ret = VM_FAULT_SIGBUS;
1216  }
1217 out:
1218  dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret);
1219  if (ret != VM_FAULT_LOCKED)
1220  unlock_page(page);
1221  return ret;
1222 }
1223 
1224 static struct vm_operations_struct ceph_vmops = {
1225  .fault = filemap_fault,
1226  .page_mkwrite = ceph_page_mkwrite,
1227  .remap_pages = generic_file_remap_pages,
1228 };
1229 
1230 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1231 {
1232  struct address_space *mapping = file->f_mapping;
1233 
1234  if (!mapping->a_ops->readpage)
1235  return -ENOEXEC;
1236  file_accessed(file);
1237  vma->vm_ops = &ceph_vmops;
1238  return 0;
1239 }