Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
stat.c
Go to the documentation of this file.
1 /*
2  * linux/fs/stat.c
3  *
4  * Copyright (C) 1991, 1992 Linus Torvalds
5  */
6 
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17 
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 
21 void generic_fillattr(struct inode *inode, struct kstat *stat)
22 {
23  stat->dev = inode->i_sb->s_dev;
24  stat->ino = inode->i_ino;
25  stat->mode = inode->i_mode;
26  stat->nlink = inode->i_nlink;
27  stat->uid = inode->i_uid;
28  stat->gid = inode->i_gid;
29  stat->rdev = inode->i_rdev;
30  stat->size = i_size_read(inode);
31  stat->atime = inode->i_atime;
32  stat->mtime = inode->i_mtime;
33  stat->ctime = inode->i_ctime;
34  stat->blksize = (1 << inode->i_blkbits);
35  stat->blocks = inode->i_blocks;
36 }
37 
39 
40 int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
41 {
42  struct inode *inode = dentry->d_inode;
43  int retval;
44 
45  retval = security_inode_getattr(mnt, dentry);
46  if (retval)
47  return retval;
48 
49  if (inode->i_op->getattr)
50  return inode->i_op->getattr(mnt, dentry, stat);
51 
52  generic_fillattr(inode, stat);
53  return 0;
54 }
55 
57 
58 int vfs_fstat(unsigned int fd, struct kstat *stat)
59 {
60  struct fd f = fdget_raw(fd);
61  int error = -EBADF;
62 
63  if (f.file) {
64  error = vfs_getattr(f.file->f_path.mnt, f.file->f_path.dentry,
65  stat);
66  fdput(f);
67  }
68  return error;
69 }
71 
72 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
73  int flag)
74 {
75  struct path path;
76  int error = -EINVAL;
77  int lookup_flags = 0;
78 
79  if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
80  AT_EMPTY_PATH)) != 0)
81  goto out;
82 
83  if (!(flag & AT_SYMLINK_NOFOLLOW))
84  lookup_flags |= LOOKUP_FOLLOW;
85  if (flag & AT_EMPTY_PATH)
86  lookup_flags |= LOOKUP_EMPTY;
87 
88  error = user_path_at(dfd, filename, lookup_flags, &path);
89  if (error)
90  goto out;
91 
92  error = vfs_getattr(path.mnt, path.dentry, stat);
93  path_put(&path);
94 out:
95  return error;
96 }
98 
99 int vfs_stat(const char __user *name, struct kstat *stat)
100 {
101  return vfs_fstatat(AT_FDCWD, name, stat, 0);
102 }
104 
105 int vfs_lstat(const char __user *name, struct kstat *stat)
106 {
107  return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
108 }
110 
111 
112 #ifdef __ARCH_WANT_OLD_STAT
113 
114 /*
115  * For backward compatibility? Maybe this should be moved
116  * into arch/i386 instead?
117  */
118 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
119 {
120  static int warncount = 5;
121  struct __old_kernel_stat tmp;
122 
123  if (warncount > 0) {
124  warncount--;
125  printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
126  current->comm);
127  } else if (warncount < 0) {
128  /* it's laughable, but... */
129  warncount = 0;
130  }
131 
132  memset(&tmp, 0, sizeof(struct __old_kernel_stat));
133  tmp.st_dev = old_encode_dev(stat->dev);
134  tmp.st_ino = stat->ino;
135  if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
136  return -EOVERFLOW;
137  tmp.st_mode = stat->mode;
138  tmp.st_nlink = stat->nlink;
139  if (tmp.st_nlink != stat->nlink)
140  return -EOVERFLOW;
141  SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
142  SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
143  tmp.st_rdev = old_encode_dev(stat->rdev);
144 #if BITS_PER_LONG == 32
145  if (stat->size > MAX_NON_LFS)
146  return -EOVERFLOW;
147 #endif
148  tmp.st_size = stat->size;
149  tmp.st_atime = stat->atime.tv_sec;
150  tmp.st_mtime = stat->mtime.tv_sec;
151  tmp.st_ctime = stat->ctime.tv_sec;
152  return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
153 }
154 
155 SYSCALL_DEFINE2(stat, const char __user *, filename,
156  struct __old_kernel_stat __user *, statbuf)
157 {
158  struct kstat stat;
159  int error;
160 
161  error = vfs_stat(filename, &stat);
162  if (error)
163  return error;
164 
165  return cp_old_stat(&stat, statbuf);
166 }
167 
168 SYSCALL_DEFINE2(lstat, const char __user *, filename,
169  struct __old_kernel_stat __user *, statbuf)
170 {
171  struct kstat stat;
172  int error;
173 
174  error = vfs_lstat(filename, &stat);
175  if (error)
176  return error;
177 
178  return cp_old_stat(&stat, statbuf);
179 }
180 
181 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
182 {
183  struct kstat stat;
184  int error = vfs_fstat(fd, &stat);
185 
186  if (!error)
187  error = cp_old_stat(&stat, statbuf);
188 
189  return error;
190 }
191 
192 #endif /* __ARCH_WANT_OLD_STAT */
193 
194 #if BITS_PER_LONG == 32
195 # define choose_32_64(a,b) a
196 #else
197 # define choose_32_64(a,b) b
198 #endif
199 
200 #define valid_dev(x) choose_32_64(old_valid_dev,new_valid_dev)(x)
201 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
202 
203 #ifndef INIT_STRUCT_STAT_PADDING
204 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
205 #endif
206 
207 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
208 {
209  struct stat tmp;
210 
211  if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
212  return -EOVERFLOW;
213 #if BITS_PER_LONG == 32
214  if (stat->size > MAX_NON_LFS)
215  return -EOVERFLOW;
216 #endif
217 
219  tmp.st_dev = encode_dev(stat->dev);
220  tmp.st_ino = stat->ino;
221  if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
222  return -EOVERFLOW;
223  tmp.st_mode = stat->mode;
224  tmp.st_nlink = stat->nlink;
225  if (tmp.st_nlink != stat->nlink)
226  return -EOVERFLOW;
229  tmp.st_rdev = encode_dev(stat->rdev);
230  tmp.st_size = stat->size;
231  tmp.st_atime = stat->atime.tv_sec;
232  tmp.st_mtime = stat->mtime.tv_sec;
233  tmp.st_ctime = stat->ctime.tv_sec;
234 #ifdef STAT_HAVE_NSEC
235  tmp.st_atime_nsec = stat->atime.tv_nsec;
236  tmp.st_mtime_nsec = stat->mtime.tv_nsec;
237  tmp.st_ctime_nsec = stat->ctime.tv_nsec;
238 #endif
239  tmp.st_blocks = stat->blocks;
240  tmp.st_blksize = stat->blksize;
241  return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
242 }
243 
244 SYSCALL_DEFINE2(newstat, const char __user *, filename,
245  struct stat __user *, statbuf)
246 {
247  struct kstat stat;
248  int error = vfs_stat(filename, &stat);
249 
250  if (error)
251  return error;
252  return cp_new_stat(&stat, statbuf);
253 }
254 
255 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
256  struct stat __user *, statbuf)
257 {
258  struct kstat stat;
259  int error;
260 
261  error = vfs_lstat(filename, &stat);
262  if (error)
263  return error;
264 
265  return cp_new_stat(&stat, statbuf);
266 }
267 
268 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
269 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
270  struct stat __user *, statbuf, int, flag)
271 {
272  struct kstat stat;
273  int error;
274 
275  error = vfs_fstatat(dfd, filename, &stat, flag);
276  if (error)
277  return error;
278  return cp_new_stat(&stat, statbuf);
279 }
280 #endif
281 
282 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
283 {
284  struct kstat stat;
285  int error = vfs_fstat(fd, &stat);
286 
287  if (!error)
288  error = cp_new_stat(&stat, statbuf);
289 
290  return error;
291 }
292 
293 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
294  char __user *, buf, int, bufsiz)
295 {
296  struct path path;
297  int error;
298  int empty = 0;
299 
300  if (bufsiz <= 0)
301  return -EINVAL;
302 
303  error = user_path_at_empty(dfd, pathname, LOOKUP_EMPTY, &path, &empty);
304  if (!error) {
305  struct inode *inode = path.dentry->d_inode;
306 
307  error = empty ? -ENOENT : -EINVAL;
308  if (inode->i_op->readlink) {
309  error = security_inode_readlink(path.dentry);
310  if (!error) {
311  touch_atime(&path);
312  error = inode->i_op->readlink(path.dentry,
313  buf, bufsiz);
314  }
315  }
316  path_put(&path);
317  }
318  return error;
319 }
320 
321 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
322  int, bufsiz)
323 {
324  return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
325 }
326 
327 
328 /* ---------- LFS-64 ----------- */
329 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
330 
331 #ifndef INIT_STRUCT_STAT64_PADDING
332 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
333 #endif
334 
335 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
336 {
337  struct stat64 tmp;
338 
339  INIT_STRUCT_STAT64_PADDING(tmp);
340 #ifdef CONFIG_MIPS
341  /* mips has weird padding, so we don't get 64 bits there */
342  if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
343  return -EOVERFLOW;
344  tmp.st_dev = new_encode_dev(stat->dev);
345  tmp.st_rdev = new_encode_dev(stat->rdev);
346 #else
347  tmp.st_dev = huge_encode_dev(stat->dev);
348  tmp.st_rdev = huge_encode_dev(stat->rdev);
349 #endif
350  tmp.st_ino = stat->ino;
351  if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
352  return -EOVERFLOW;
353 #ifdef STAT64_HAS_BROKEN_ST_INO
354  tmp.__st_ino = stat->ino;
355 #endif
356  tmp.st_mode = stat->mode;
357  tmp.st_nlink = stat->nlink;
358  tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
359  tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
360  tmp.st_atime = stat->atime.tv_sec;
361  tmp.st_atime_nsec = stat->atime.tv_nsec;
362  tmp.st_mtime = stat->mtime.tv_sec;
363  tmp.st_mtime_nsec = stat->mtime.tv_nsec;
364  tmp.st_ctime = stat->ctime.tv_sec;
365  tmp.st_ctime_nsec = stat->ctime.tv_nsec;
366  tmp.st_size = stat->size;
367  tmp.st_blocks = stat->blocks;
368  tmp.st_blksize = stat->blksize;
369  return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
370 }
371 
372 SYSCALL_DEFINE2(stat64, const char __user *, filename,
373  struct stat64 __user *, statbuf)
374 {
375  struct kstat stat;
376  int error = vfs_stat(filename, &stat);
377 
378  if (!error)
379  error = cp_new_stat64(&stat, statbuf);
380 
381  return error;
382 }
383 
384 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
385  struct stat64 __user *, statbuf)
386 {
387  struct kstat stat;
388  int error = vfs_lstat(filename, &stat);
389 
390  if (!error)
391  error = cp_new_stat64(&stat, statbuf);
392 
393  return error;
394 }
395 
396 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
397 {
398  struct kstat stat;
399  int error = vfs_fstat(fd, &stat);
400 
401  if (!error)
402  error = cp_new_stat64(&stat, statbuf);
403 
404  return error;
405 }
406 
407 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
408  struct stat64 __user *, statbuf, int, flag)
409 {
410  struct kstat stat;
411  int error;
412 
413  error = vfs_fstatat(dfd, filename, &stat, flag);
414  if (error)
415  return error;
416  return cp_new_stat64(&stat, statbuf);
417 }
418 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
419 
420 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
421 void __inode_add_bytes(struct inode *inode, loff_t bytes)
422 {
423  inode->i_blocks += bytes >> 9;
424  bytes &= 511;
425  inode->i_bytes += bytes;
426  if (inode->i_bytes >= 512) {
427  inode->i_blocks++;
428  inode->i_bytes -= 512;
429  }
430 }
431 
432 void inode_add_bytes(struct inode *inode, loff_t bytes)
433 {
434  spin_lock(&inode->i_lock);
435  __inode_add_bytes(inode, bytes);
436  spin_unlock(&inode->i_lock);
437 }
438 
440 
441 void inode_sub_bytes(struct inode *inode, loff_t bytes)
442 {
443  spin_lock(&inode->i_lock);
444  inode->i_blocks -= bytes >> 9;
445  bytes &= 511;
446  if (inode->i_bytes < bytes) {
447  inode->i_blocks--;
448  inode->i_bytes += 512;
449  }
450  inode->i_bytes -= bytes;
451  spin_unlock(&inode->i_lock);
452 }
453 
455 
456 loff_t inode_get_bytes(struct inode *inode)
457 {
458  loff_t ret;
459 
460  spin_lock(&inode->i_lock);
461  ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
462  spin_unlock(&inode->i_lock);
463  return ret;
464 }
465 
467 
468 void inode_set_bytes(struct inode *inode, loff_t bytes)
469 {
470  /* Caller is here responsible for sufficient locking
471  * (ie. inode->i_lock) */
472  inode->i_blocks = bytes >> 9;
473  inode->i_bytes = bytes & 511;
474 }
475