25 #include <linux/slab.h>
26 #include <linux/stat.h>
27 #include <linux/sched.h>
34 #define CACHETAG_LEN 11
58 static uint16_t v9fs_cache_session_get_key(
const void *cookie_netfs_data,
66 v9ses, buffer, bufmax);
69 klen =
strlen(v9ses->cachetag);
74 memcpy(buffer, v9ses->cachetag, klen);
82 .get_key = v9fs_cache_session_get_key,
89 v9fs_random_cachetag(v9ses);
91 v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.
primary_index,
92 &v9fs_cache_session_index_def,
95 v9ses, v9ses->fscache);
101 v9ses, v9ses->fscache);
102 fscache_relinquish_cookie(v9ses->fscache, 0);
103 v9ses->fscache =
NULL;
107 static uint16_t v9fs_cache_inode_get_key(
const void *cookie_netfs_data,
110 const struct v9fs_inode *v9inode = cookie_netfs_data;
111 memcpy(buffer, &v9inode->
qid.path,
sizeof(v9inode->
qid.path));
114 return sizeof(v9inode->
qid.path);
117 static void v9fs_cache_inode_get_attr(
const void *cookie_netfs_data,
120 const struct v9fs_inode *v9inode = cookie_netfs_data;
121 *size = i_size_read(&v9inode->
vfs_inode);
127 static uint16_t v9fs_cache_inode_get_aux(
const void *cookie_netfs_data,
130 const struct v9fs_inode *v9inode = cookie_netfs_data;
131 memcpy(buffer, &v9inode->
qid.version,
sizeof(v9inode->
qid.version));
134 return sizeof(v9inode->
qid.version);
142 const struct v9fs_inode *v9inode = cookie_netfs_data;
144 if (buflen !=
sizeof(v9inode->
qid.version))
147 if (
memcmp(buffer, &v9inode->
qid.version,
148 sizeof(v9inode->
qid.version)))
154 static void v9fs_cache_inode_now_uncached(
void *cookie_netfs_data)
156 struct v9fs_inode *v9inode = cookie_netfs_data;
161 pagevec_init(&pvec, 0);
171 for (loop = 0; loop < nr_pages; loop++)
174 first = pvec.pages[nr_pages - 1]->index + 1;
177 pagevec_release(&pvec);
185 .get_key = v9fs_cache_inode_get_key,
186 .get_attr = v9fs_cache_inode_get_attr,
187 .get_aux = v9fs_cache_inode_get_aux,
188 .check_aux = v9fs_cache_inode_check_aux,
189 .now_uncached = v9fs_cache_inode_now_uncached,
200 v9inode = V9FS_I(inode);
201 if (v9inode->fscache)
204 v9ses = v9fs_inode2v9ses(inode);
205 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
206 &v9fs_cache_inode_index_def,
210 inode, v9inode->fscache);
217 if (!v9inode->fscache)
220 inode, v9inode->fscache);
222 fscache_relinquish_cookie(v9inode->fscache, 0);
223 v9inode->fscache =
NULL;
230 if (!v9inode->fscache)
233 inode, v9inode->fscache);
235 fscache_relinquish_cookie(v9inode->fscache, 1);
236 v9inode->fscache =
NULL;
244 if (!v9inode->fscache)
247 spin_lock(&v9inode->fscache_lock);
254 spin_unlock(&v9inode->fscache_lock);
263 if (!v9inode->fscache)
266 old = v9inode->fscache;
268 spin_lock(&v9inode->fscache_lock);
269 fscache_relinquish_cookie(v9inode->fscache, 1);
271 v9ses = v9fs_inode2v9ses(inode);
272 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
273 &v9fs_cache_inode_index_def,
276 inode, old, v9inode->fscache);
278 spin_unlock(&v9inode->fscache_lock);
286 BUG_ON(!v9inode->fscache);
288 return fscache_maybe_release_page(v9inode->fscache, page, gfp);
296 BUG_ON(!v9inode->fscache);
299 fscache_wait_on_page_write(v9inode->fscache, page);
300 BUG_ON(!PageLocked(page));
301 fscache_uncache_page(v9inode->fscache, page);
305 static void v9fs_vfs_readpage_complete(
struct page *
page,
void *
data,
309 SetPageUptodate(page);
324 const struct v9fs_inode *v9inode = V9FS_I(inode);
327 if (!v9inode->fscache)
330 ret = fscache_read_or_alloc_page(v9inode->fscache,
332 v9fs_vfs_readpage_complete,
362 const struct v9fs_inode *v9inode = V9FS_I(inode);
365 if (!v9inode->fscache)
368 ret = fscache_read_or_alloc_pages(v9inode->fscache,
369 mapping, pages, nr_pages,
370 v9fs_vfs_readpage_complete,
372 mapping_gfp_mask(mapping));
379 BUG_ON(!list_empty(pages));
397 const struct v9fs_inode *v9inode = V9FS_I(inode);
400 ret = fscache_write_page(v9inode->fscache, page,
GFP_KERNEL);
403 v9fs_uncache_page(inode, page);
411 const struct v9fs_inode *v9inode = V9FS_I(inode);
414 fscache_wait_on_page_write(v9inode->fscache, page);