Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * linux/fs/stat.c
4 : *
5 : * Copyright (C) 1991, 1992 Linus Torvalds
6 : */
7 :
8 : #include <linux/blkdev.h>
9 : #include <linux/export.h>
10 : #include <linux/mm.h>
11 : #include <linux/errno.h>
12 : #include <linux/file.h>
13 : #include <linux/highuid.h>
14 : #include <linux/fs.h>
15 : #include <linux/namei.h>
16 : #include <linux/security.h>
17 : #include <linux/cred.h>
18 : #include <linux/syscalls.h>
19 : #include <linux/pagemap.h>
20 : #include <linux/compat.h>
21 : #include <linux/iversion.h>
22 :
23 : #include <linux/uaccess.h>
24 : #include <asm/unistd.h>
25 :
26 : #include "internal.h"
27 : #include "mount.h"
28 :
29 : /**
30 : * generic_fillattr - Fill in the basic attributes from the inode struct
31 : * @idmap: idmap of the mount the inode was found from
32 : * @inode: Inode to use as the source
33 : * @stat: Where to fill in the attributes
34 : *
35 : * Fill in the basic attributes in the kstat structure from data that's to be
36 : * found on the VFS inode structure. This is the default if no getattr inode
37 : * operation is supplied.
38 : *
39 : * If the inode has been found through an idmapped mount the idmap of
40 : * the vfsmount must be passed through @idmap. This function will then
41 : * take care to map the inode according to @idmap before filling in the
42 : * uid and gid filds. On non-idmapped mounts or if permission checking is to be
43 : * performed on the raw inode simply passs @nop_mnt_idmap.
44 : */
45 0 : void generic_fillattr(struct mnt_idmap *idmap, struct inode *inode,
46 : struct kstat *stat)
47 : {
48 0 : vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
49 0 : vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
50 :
51 0 : stat->dev = inode->i_sb->s_dev;
52 0 : stat->ino = inode->i_ino;
53 0 : stat->mode = inode->i_mode;
54 0 : stat->nlink = inode->i_nlink;
55 0 : stat->uid = vfsuid_into_kuid(vfsuid);
56 0 : stat->gid = vfsgid_into_kgid(vfsgid);
57 0 : stat->rdev = inode->i_rdev;
58 0 : stat->size = i_size_read(inode);
59 0 : stat->atime = inode->i_atime;
60 0 : stat->mtime = inode->i_mtime;
61 0 : stat->ctime = inode->i_ctime;
62 0 : stat->blksize = i_blocksize(inode);
63 0 : stat->blocks = inode->i_blocks;
64 0 : }
65 : EXPORT_SYMBOL(generic_fillattr);
66 :
67 : /**
68 : * generic_fill_statx_attr - Fill in the statx attributes from the inode flags
69 : * @inode: Inode to use as the source
70 : * @stat: Where to fill in the attribute flags
71 : *
72 : * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the
73 : * inode that are published on i_flags and enforced by the VFS.
74 : */
75 0 : void generic_fill_statx_attr(struct inode *inode, struct kstat *stat)
76 : {
77 0 : if (inode->i_flags & S_IMMUTABLE)
78 0 : stat->attributes |= STATX_ATTR_IMMUTABLE;
79 0 : if (inode->i_flags & S_APPEND)
80 0 : stat->attributes |= STATX_ATTR_APPEND;
81 0 : stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS;
82 0 : }
83 : EXPORT_SYMBOL(generic_fill_statx_attr);
84 :
85 : /**
86 : * vfs_getattr_nosec - getattr without security checks
87 : * @path: file to get attributes from
88 : * @stat: structure to return attributes in
89 : * @request_mask: STATX_xxx flags indicating what the caller wants
90 : * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
91 : *
92 : * Get attributes without calling security_inode_getattr.
93 : *
94 : * Currently the only caller other than vfs_getattr is internal to the
95 : * filehandle lookup code, which uses only the inode number and returns no
96 : * attributes to any user. Any other code probably wants vfs_getattr.
97 : */
98 0 : int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
99 : u32 request_mask, unsigned int query_flags)
100 : {
101 : struct mnt_idmap *idmap;
102 0 : struct inode *inode = d_backing_inode(path->dentry);
103 :
104 0 : memset(stat, 0, sizeof(*stat));
105 0 : stat->result_mask |= STATX_BASIC_STATS;
106 0 : query_flags &= AT_STATX_SYNC_TYPE;
107 :
108 : /* allow the fs to override these if it really wants to */
109 : /* SB_NOATIME means filesystem supplies dummy atime value */
110 0 : if (inode->i_sb->s_flags & SB_NOATIME)
111 0 : stat->result_mask &= ~STATX_ATIME;
112 :
113 : /*
114 : * Note: If you add another clause to set an attribute flag, please
115 : * update attributes_mask below.
116 : */
117 0 : if (IS_AUTOMOUNT(inode))
118 0 : stat->attributes |= STATX_ATTR_AUTOMOUNT;
119 :
120 : if (IS_DAX(inode))
121 : stat->attributes |= STATX_ATTR_DAX;
122 :
123 0 : stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
124 : STATX_ATTR_DAX);
125 :
126 0 : if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
127 0 : stat->result_mask |= STATX_CHANGE_COOKIE;
128 0 : stat->change_cookie = inode_query_iversion(inode);
129 : }
130 :
131 0 : idmap = mnt_idmap(path->mnt);
132 0 : if (inode->i_op->getattr)
133 0 : return inode->i_op->getattr(idmap, path, stat,
134 : request_mask, query_flags);
135 :
136 0 : generic_fillattr(idmap, inode, stat);
137 0 : return 0;
138 : }
139 : EXPORT_SYMBOL(vfs_getattr_nosec);
140 :
141 : /*
142 : * vfs_getattr - Get the enhanced basic attributes of a file
143 : * @path: The file of interest
144 : * @stat: Where to return the statistics
145 : * @request_mask: STATX_xxx flags indicating what the caller wants
146 : * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
147 : *
148 : * Ask the filesystem for a file's attributes. The caller must indicate in
149 : * request_mask and query_flags to indicate what they want.
150 : *
151 : * If the file is remote, the filesystem can be forced to update the attributes
152 : * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
153 : * suppress the update by passing AT_STATX_DONT_SYNC.
154 : *
155 : * Bits must have been set in request_mask to indicate which attributes the
156 : * caller wants retrieving. Any such attribute not requested may be returned
157 : * anyway, but the value may be approximate, and, if remote, may not have been
158 : * synchronised with the server.
159 : *
160 : * 0 will be returned on success, and a -ve error code if unsuccessful.
161 : */
162 0 : int vfs_getattr(const struct path *path, struct kstat *stat,
163 : u32 request_mask, unsigned int query_flags)
164 : {
165 : int retval;
166 :
167 0 : retval = security_inode_getattr(path);
168 : if (retval)
169 : return retval;
170 0 : return vfs_getattr_nosec(path, stat, request_mask, query_flags);
171 : }
172 : EXPORT_SYMBOL(vfs_getattr);
173 :
174 : /**
175 : * vfs_fstat - Get the basic attributes by file descriptor
176 : * @fd: The file descriptor referring to the file of interest
177 : * @stat: The result structure to fill in.
178 : *
179 : * This function is a wrapper around vfs_getattr(). The main difference is
180 : * that it uses a file descriptor to determine the file location.
181 : *
182 : * 0 will be returned on success, and a -ve error code if unsuccessful.
183 : */
184 0 : int vfs_fstat(int fd, struct kstat *stat)
185 : {
186 : struct fd f;
187 : int error;
188 :
189 0 : f = fdget_raw(fd);
190 0 : if (!f.file)
191 : return -EBADF;
192 0 : error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
193 0 : fdput(f);
194 : return error;
195 : }
196 :
197 0 : int getname_statx_lookup_flags(int flags)
198 : {
199 0 : int lookup_flags = 0;
200 :
201 0 : if (!(flags & AT_SYMLINK_NOFOLLOW))
202 0 : lookup_flags |= LOOKUP_FOLLOW;
203 0 : if (!(flags & AT_NO_AUTOMOUNT))
204 0 : lookup_flags |= LOOKUP_AUTOMOUNT;
205 0 : if (flags & AT_EMPTY_PATH)
206 0 : lookup_flags |= LOOKUP_EMPTY;
207 :
208 0 : return lookup_flags;
209 : }
210 :
211 : /**
212 : * vfs_statx - Get basic and extra attributes by filename
213 : * @dfd: A file descriptor representing the base dir for a relative filename
214 : * @filename: The name of the file of interest
215 : * @flags: Flags to control the query
216 : * @stat: The result structure to fill in.
217 : * @request_mask: STATX_xxx flags indicating what the caller wants
218 : *
219 : * This function is a wrapper around vfs_getattr(). The main difference is
220 : * that it uses a filename and base directory to determine the file location.
221 : * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
222 : * at the given name from being referenced.
223 : *
224 : * 0 will be returned on success, and a -ve error code if unsuccessful.
225 : */
226 0 : static int vfs_statx(int dfd, struct filename *filename, int flags,
227 : struct kstat *stat, u32 request_mask)
228 : {
229 : struct path path;
230 0 : unsigned int lookup_flags = getname_statx_lookup_flags(flags);
231 : int error;
232 :
233 0 : if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
234 : AT_STATX_SYNC_TYPE))
235 : return -EINVAL;
236 :
237 : retry:
238 0 : error = filename_lookup(dfd, filename, lookup_flags, &path, NULL);
239 0 : if (error)
240 : goto out;
241 :
242 0 : error = vfs_getattr(&path, stat, request_mask, flags);
243 :
244 0 : stat->mnt_id = real_mount(path.mnt)->mnt_id;
245 0 : stat->result_mask |= STATX_MNT_ID;
246 :
247 0 : if (path.mnt->mnt_root == path.dentry)
248 0 : stat->attributes |= STATX_ATTR_MOUNT_ROOT;
249 0 : stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
250 :
251 : /* Handle STATX_DIOALIGN for block devices. */
252 0 : if (request_mask & STATX_DIOALIGN) {
253 0 : struct inode *inode = d_backing_inode(path.dentry);
254 :
255 0 : if (S_ISBLK(inode->i_mode))
256 0 : bdev_statx_dioalign(inode, stat);
257 : }
258 :
259 0 : path_put(&path);
260 0 : if (retry_estale(error, lookup_flags)) {
261 0 : lookup_flags |= LOOKUP_REVAL;
262 0 : goto retry;
263 : }
264 : out:
265 : return error;
266 : }
267 :
268 0 : int vfs_fstatat(int dfd, const char __user *filename,
269 : struct kstat *stat, int flags)
270 : {
271 : int ret;
272 0 : int statx_flags = flags | AT_NO_AUTOMOUNT;
273 : struct filename *name;
274 :
275 0 : name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
276 0 : ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
277 0 : putname(name);
278 :
279 0 : return ret;
280 : }
281 :
282 : #ifdef __ARCH_WANT_OLD_STAT
283 :
284 : /*
285 : * For backward compatibility? Maybe this should be moved
286 : * into arch/i386 instead?
287 : */
288 0 : static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
289 : {
290 : static int warncount = 5;
291 : struct __old_kernel_stat tmp;
292 :
293 0 : if (warncount > 0) {
294 0 : warncount--;
295 0 : printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
296 : current->comm);
297 0 : } else if (warncount < 0) {
298 : /* it's laughable, but... */
299 0 : warncount = 0;
300 : }
301 :
302 0 : memset(&tmp, 0, sizeof(struct __old_kernel_stat));
303 0 : tmp.st_dev = old_encode_dev(stat->dev);
304 0 : tmp.st_ino = stat->ino;
305 0 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
306 : return -EOVERFLOW;
307 0 : tmp.st_mode = stat->mode;
308 0 : tmp.st_nlink = stat->nlink;
309 0 : if (tmp.st_nlink != stat->nlink)
310 : return -EOVERFLOW;
311 0 : SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
312 0 : SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
313 0 : tmp.st_rdev = old_encode_dev(stat->rdev);
314 : #if BITS_PER_LONG == 32
315 : if (stat->size > MAX_NON_LFS)
316 : return -EOVERFLOW;
317 : #endif
318 0 : tmp.st_size = stat->size;
319 0 : tmp.st_atime = stat->atime.tv_sec;
320 0 : tmp.st_mtime = stat->mtime.tv_sec;
321 0 : tmp.st_ctime = stat->ctime.tv_sec;
322 0 : return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
323 : }
324 :
325 0 : SYSCALL_DEFINE2(stat, const char __user *, filename,
326 : struct __old_kernel_stat __user *, statbuf)
327 : {
328 : struct kstat stat;
329 : int error;
330 :
331 0 : error = vfs_stat(filename, &stat);
332 0 : if (error)
333 0 : return error;
334 :
335 0 : return cp_old_stat(&stat, statbuf);
336 : }
337 :
338 0 : SYSCALL_DEFINE2(lstat, const char __user *, filename,
339 : struct __old_kernel_stat __user *, statbuf)
340 : {
341 : struct kstat stat;
342 : int error;
343 :
344 0 : error = vfs_lstat(filename, &stat);
345 0 : if (error)
346 0 : return error;
347 :
348 0 : return cp_old_stat(&stat, statbuf);
349 : }
350 :
351 0 : SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
352 : {
353 : struct kstat stat;
354 0 : int error = vfs_fstat(fd, &stat);
355 :
356 0 : if (!error)
357 0 : error = cp_old_stat(&stat, statbuf);
358 :
359 0 : return error;
360 : }
361 :
362 : #endif /* __ARCH_WANT_OLD_STAT */
363 :
364 : #ifdef __ARCH_WANT_NEW_STAT
365 :
366 : #if BITS_PER_LONG == 32
367 : # define choose_32_64(a,b) a
368 : #else
369 : # define choose_32_64(a,b) b
370 : #endif
371 :
372 : #ifndef INIT_STRUCT_STAT_PADDING
373 : # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
374 : #endif
375 :
376 0 : static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
377 : {
378 : struct stat tmp;
379 :
380 : if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
381 : return -EOVERFLOW;
382 : if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
383 : return -EOVERFLOW;
384 : #if BITS_PER_LONG == 32
385 : if (stat->size > MAX_NON_LFS)
386 : return -EOVERFLOW;
387 : #endif
388 :
389 0 : INIT_STRUCT_STAT_PADDING(tmp);
390 0 : tmp.st_dev = new_encode_dev(stat->dev);
391 0 : tmp.st_ino = stat->ino;
392 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
393 : return -EOVERFLOW;
394 0 : tmp.st_mode = stat->mode;
395 0 : tmp.st_nlink = stat->nlink;
396 : if (tmp.st_nlink != stat->nlink)
397 : return -EOVERFLOW;
398 0 : SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
399 0 : SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
400 0 : tmp.st_rdev = new_encode_dev(stat->rdev);
401 0 : tmp.st_size = stat->size;
402 0 : tmp.st_atime = stat->atime.tv_sec;
403 0 : tmp.st_mtime = stat->mtime.tv_sec;
404 0 : tmp.st_ctime = stat->ctime.tv_sec;
405 : #ifdef STAT_HAVE_NSEC
406 0 : tmp.st_atime_nsec = stat->atime.tv_nsec;
407 0 : tmp.st_mtime_nsec = stat->mtime.tv_nsec;
408 0 : tmp.st_ctime_nsec = stat->ctime.tv_nsec;
409 : #endif
410 0 : tmp.st_blocks = stat->blocks;
411 0 : tmp.st_blksize = stat->blksize;
412 0 : return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
413 : }
414 :
415 0 : SYSCALL_DEFINE2(newstat, const char __user *, filename,
416 : struct stat __user *, statbuf)
417 : {
418 : struct kstat stat;
419 0 : int error = vfs_stat(filename, &stat);
420 :
421 0 : if (error)
422 0 : return error;
423 0 : return cp_new_stat(&stat, statbuf);
424 : }
425 :
426 0 : SYSCALL_DEFINE2(newlstat, const char __user *, filename,
427 : struct stat __user *, statbuf)
428 : {
429 : struct kstat stat;
430 : int error;
431 :
432 0 : error = vfs_lstat(filename, &stat);
433 0 : if (error)
434 0 : return error;
435 :
436 0 : return cp_new_stat(&stat, statbuf);
437 : }
438 :
439 : #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
440 0 : SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
441 : struct stat __user *, statbuf, int, flag)
442 : {
443 : struct kstat stat;
444 : int error;
445 :
446 0 : error = vfs_fstatat(dfd, filename, &stat, flag);
447 0 : if (error)
448 0 : return error;
449 0 : return cp_new_stat(&stat, statbuf);
450 : }
451 : #endif
452 :
453 0 : SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
454 : {
455 : struct kstat stat;
456 0 : int error = vfs_fstat(fd, &stat);
457 :
458 0 : if (!error)
459 0 : error = cp_new_stat(&stat, statbuf);
460 :
461 0 : return error;
462 : }
463 : #endif
464 :
465 0 : static int do_readlinkat(int dfd, const char __user *pathname,
466 : char __user *buf, int bufsiz)
467 : {
468 : struct path path;
469 : int error;
470 0 : int empty = 0;
471 0 : unsigned int lookup_flags = LOOKUP_EMPTY;
472 :
473 0 : if (bufsiz <= 0)
474 : return -EINVAL;
475 :
476 : retry:
477 0 : error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
478 0 : if (!error) {
479 0 : struct inode *inode = d_backing_inode(path.dentry);
480 :
481 0 : error = empty ? -ENOENT : -EINVAL;
482 : /*
483 : * AFS mountpoints allow readlink(2) but are not symlinks
484 : */
485 0 : if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
486 0 : error = security_inode_readlink(path.dentry);
487 : if (!error) {
488 0 : touch_atime(&path);
489 0 : error = vfs_readlink(path.dentry, buf, bufsiz);
490 : }
491 : }
492 0 : path_put(&path);
493 0 : if (retry_estale(error, lookup_flags)) {
494 : lookup_flags |= LOOKUP_REVAL;
495 : goto retry;
496 : }
497 : }
498 : return error;
499 : }
500 :
501 0 : SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
502 : char __user *, buf, int, bufsiz)
503 : {
504 0 : return do_readlinkat(dfd, pathname, buf, bufsiz);
505 : }
506 :
507 0 : SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
508 : int, bufsiz)
509 : {
510 0 : return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
511 : }
512 :
513 :
514 : /* ---------- LFS-64 ----------- */
515 : #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
516 :
517 : #ifndef INIT_STRUCT_STAT64_PADDING
518 : # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
519 : #endif
520 :
521 : static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
522 : {
523 : struct stat64 tmp;
524 :
525 : INIT_STRUCT_STAT64_PADDING(tmp);
526 : #ifdef CONFIG_MIPS
527 : /* mips has weird padding, so we don't get 64 bits there */
528 : tmp.st_dev = new_encode_dev(stat->dev);
529 : tmp.st_rdev = new_encode_dev(stat->rdev);
530 : #else
531 : tmp.st_dev = huge_encode_dev(stat->dev);
532 : tmp.st_rdev = huge_encode_dev(stat->rdev);
533 : #endif
534 : tmp.st_ino = stat->ino;
535 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
536 : return -EOVERFLOW;
537 : #ifdef STAT64_HAS_BROKEN_ST_INO
538 : tmp.__st_ino = stat->ino;
539 : #endif
540 : tmp.st_mode = stat->mode;
541 : tmp.st_nlink = stat->nlink;
542 : tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
543 : tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
544 : tmp.st_atime = stat->atime.tv_sec;
545 : tmp.st_atime_nsec = stat->atime.tv_nsec;
546 : tmp.st_mtime = stat->mtime.tv_sec;
547 : tmp.st_mtime_nsec = stat->mtime.tv_nsec;
548 : tmp.st_ctime = stat->ctime.tv_sec;
549 : tmp.st_ctime_nsec = stat->ctime.tv_nsec;
550 : tmp.st_size = stat->size;
551 : tmp.st_blocks = stat->blocks;
552 : tmp.st_blksize = stat->blksize;
553 : return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
554 : }
555 :
556 : SYSCALL_DEFINE2(stat64, const char __user *, filename,
557 : struct stat64 __user *, statbuf)
558 : {
559 : struct kstat stat;
560 : int error = vfs_stat(filename, &stat);
561 :
562 : if (!error)
563 : error = cp_new_stat64(&stat, statbuf);
564 :
565 : return error;
566 : }
567 :
568 : SYSCALL_DEFINE2(lstat64, const char __user *, filename,
569 : struct stat64 __user *, statbuf)
570 : {
571 : struct kstat stat;
572 : int error = vfs_lstat(filename, &stat);
573 :
574 : if (!error)
575 : error = cp_new_stat64(&stat, statbuf);
576 :
577 : return error;
578 : }
579 :
580 : SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
581 : {
582 : struct kstat stat;
583 : int error = vfs_fstat(fd, &stat);
584 :
585 : if (!error)
586 : error = cp_new_stat64(&stat, statbuf);
587 :
588 : return error;
589 : }
590 :
591 : SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
592 : struct stat64 __user *, statbuf, int, flag)
593 : {
594 : struct kstat stat;
595 : int error;
596 :
597 : error = vfs_fstatat(dfd, filename, &stat, flag);
598 : if (error)
599 : return error;
600 : return cp_new_stat64(&stat, statbuf);
601 : }
602 : #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
603 :
604 : static noinline_for_stack int
605 0 : cp_statx(const struct kstat *stat, struct statx __user *buffer)
606 : {
607 : struct statx tmp;
608 :
609 0 : memset(&tmp, 0, sizeof(tmp));
610 :
611 : /* STATX_CHANGE_COOKIE is kernel-only for now */
612 0 : tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE;
613 0 : tmp.stx_blksize = stat->blksize;
614 : /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */
615 0 : tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC;
616 0 : tmp.stx_nlink = stat->nlink;
617 0 : tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
618 0 : tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
619 0 : tmp.stx_mode = stat->mode;
620 0 : tmp.stx_ino = stat->ino;
621 0 : tmp.stx_size = stat->size;
622 0 : tmp.stx_blocks = stat->blocks;
623 0 : tmp.stx_attributes_mask = stat->attributes_mask;
624 0 : tmp.stx_atime.tv_sec = stat->atime.tv_sec;
625 0 : tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
626 0 : tmp.stx_btime.tv_sec = stat->btime.tv_sec;
627 0 : tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
628 0 : tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
629 0 : tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
630 0 : tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
631 0 : tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
632 0 : tmp.stx_rdev_major = MAJOR(stat->rdev);
633 0 : tmp.stx_rdev_minor = MINOR(stat->rdev);
634 0 : tmp.stx_dev_major = MAJOR(stat->dev);
635 0 : tmp.stx_dev_minor = MINOR(stat->dev);
636 0 : tmp.stx_mnt_id = stat->mnt_id;
637 0 : tmp.stx_dio_mem_align = stat->dio_mem_align;
638 0 : tmp.stx_dio_offset_align = stat->dio_offset_align;
639 :
640 0 : return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
641 : }
642 :
643 0 : int do_statx(int dfd, struct filename *filename, unsigned int flags,
644 : unsigned int mask, struct statx __user *buffer)
645 : {
646 : struct kstat stat;
647 : int error;
648 :
649 0 : if (mask & STATX__RESERVED)
650 : return -EINVAL;
651 0 : if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
652 : return -EINVAL;
653 :
654 : /* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
655 : * from userland.
656 : */
657 0 : mask &= ~STATX_CHANGE_COOKIE;
658 :
659 0 : error = vfs_statx(dfd, filename, flags, &stat, mask);
660 0 : if (error)
661 : return error;
662 :
663 0 : return cp_statx(&stat, buffer);
664 : }
665 :
666 : /**
667 : * sys_statx - System call to get enhanced stats
668 : * @dfd: Base directory to pathwalk from *or* fd to stat.
669 : * @filename: File to stat or "" with AT_EMPTY_PATH
670 : * @flags: AT_* flags to control pathwalk.
671 : * @mask: Parts of statx struct actually required.
672 : * @buffer: Result buffer.
673 : *
674 : * Note that fstat() can be emulated by setting dfd to the fd of interest,
675 : * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
676 : */
677 0 : SYSCALL_DEFINE5(statx,
678 : int, dfd, const char __user *, filename, unsigned, flags,
679 : unsigned int, mask,
680 : struct statx __user *, buffer)
681 : {
682 : int ret;
683 : struct filename *name;
684 :
685 0 : name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL);
686 0 : ret = do_statx(dfd, name, flags, mask, buffer);
687 0 : putname(name);
688 :
689 0 : return ret;
690 : }
691 :
692 : #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
693 : static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
694 : {
695 : struct compat_stat tmp;
696 :
697 : if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
698 : return -EOVERFLOW;
699 : if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
700 : return -EOVERFLOW;
701 :
702 : memset(&tmp, 0, sizeof(tmp));
703 : tmp.st_dev = new_encode_dev(stat->dev);
704 : tmp.st_ino = stat->ino;
705 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
706 : return -EOVERFLOW;
707 : tmp.st_mode = stat->mode;
708 : tmp.st_nlink = stat->nlink;
709 : if (tmp.st_nlink != stat->nlink)
710 : return -EOVERFLOW;
711 : SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
712 : SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
713 : tmp.st_rdev = new_encode_dev(stat->rdev);
714 : if ((u64) stat->size > MAX_NON_LFS)
715 : return -EOVERFLOW;
716 : tmp.st_size = stat->size;
717 : tmp.st_atime = stat->atime.tv_sec;
718 : tmp.st_atime_nsec = stat->atime.tv_nsec;
719 : tmp.st_mtime = stat->mtime.tv_sec;
720 : tmp.st_mtime_nsec = stat->mtime.tv_nsec;
721 : tmp.st_ctime = stat->ctime.tv_sec;
722 : tmp.st_ctime_nsec = stat->ctime.tv_nsec;
723 : tmp.st_blocks = stat->blocks;
724 : tmp.st_blksize = stat->blksize;
725 : return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
726 : }
727 :
728 : COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
729 : struct compat_stat __user *, statbuf)
730 : {
731 : struct kstat stat;
732 : int error;
733 :
734 : error = vfs_stat(filename, &stat);
735 : if (error)
736 : return error;
737 : return cp_compat_stat(&stat, statbuf);
738 : }
739 :
740 : COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
741 : struct compat_stat __user *, statbuf)
742 : {
743 : struct kstat stat;
744 : int error;
745 :
746 : error = vfs_lstat(filename, &stat);
747 : if (error)
748 : return error;
749 : return cp_compat_stat(&stat, statbuf);
750 : }
751 :
752 : #ifndef __ARCH_WANT_STAT64
753 : COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
754 : const char __user *, filename,
755 : struct compat_stat __user *, statbuf, int, flag)
756 : {
757 : struct kstat stat;
758 : int error;
759 :
760 : error = vfs_fstatat(dfd, filename, &stat, flag);
761 : if (error)
762 : return error;
763 : return cp_compat_stat(&stat, statbuf);
764 : }
765 : #endif
766 :
767 : COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
768 : struct compat_stat __user *, statbuf)
769 : {
770 : struct kstat stat;
771 : int error = vfs_fstat(fd, &stat);
772 :
773 : if (!error)
774 : error = cp_compat_stat(&stat, statbuf);
775 : return error;
776 : }
777 : #endif
778 :
779 : /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
780 0 : void __inode_add_bytes(struct inode *inode, loff_t bytes)
781 : {
782 0 : inode->i_blocks += bytes >> 9;
783 0 : bytes &= 511;
784 0 : inode->i_bytes += bytes;
785 0 : if (inode->i_bytes >= 512) {
786 0 : inode->i_blocks++;
787 0 : inode->i_bytes -= 512;
788 : }
789 0 : }
790 : EXPORT_SYMBOL(__inode_add_bytes);
791 :
792 0 : void inode_add_bytes(struct inode *inode, loff_t bytes)
793 : {
794 0 : spin_lock(&inode->i_lock);
795 0 : __inode_add_bytes(inode, bytes);
796 0 : spin_unlock(&inode->i_lock);
797 0 : }
798 :
799 : EXPORT_SYMBOL(inode_add_bytes);
800 :
801 0 : void __inode_sub_bytes(struct inode *inode, loff_t bytes)
802 : {
803 0 : inode->i_blocks -= bytes >> 9;
804 0 : bytes &= 511;
805 0 : if (inode->i_bytes < bytes) {
806 0 : inode->i_blocks--;
807 0 : inode->i_bytes += 512;
808 : }
809 0 : inode->i_bytes -= bytes;
810 0 : }
811 :
812 : EXPORT_SYMBOL(__inode_sub_bytes);
813 :
814 0 : void inode_sub_bytes(struct inode *inode, loff_t bytes)
815 : {
816 0 : spin_lock(&inode->i_lock);
817 0 : __inode_sub_bytes(inode, bytes);
818 0 : spin_unlock(&inode->i_lock);
819 0 : }
820 :
821 : EXPORT_SYMBOL(inode_sub_bytes);
822 :
823 0 : loff_t inode_get_bytes(struct inode *inode)
824 : {
825 : loff_t ret;
826 :
827 0 : spin_lock(&inode->i_lock);
828 0 : ret = __inode_get_bytes(inode);
829 0 : spin_unlock(&inode->i_lock);
830 0 : return ret;
831 : }
832 :
833 : EXPORT_SYMBOL(inode_get_bytes);
834 :
835 0 : void inode_set_bytes(struct inode *inode, loff_t bytes)
836 : {
837 : /* Caller is here responsible for sufficient locking
838 : * (ie. inode->i_lock) */
839 0 : inode->i_blocks = bytes >> 9;
840 0 : inode->i_bytes = bytes & 511;
841 0 : }
842 :
843 : EXPORT_SYMBOL(inode_set_bytes);
|