Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * (C) 1997 Linus Torvalds
4 : * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 : */
6 : #include <linux/export.h>
7 : #include <linux/fs.h>
8 : #include <linux/filelock.h>
9 : #include <linux/mm.h>
10 : #include <linux/backing-dev.h>
11 : #include <linux/hash.h>
12 : #include <linux/swap.h>
13 : #include <linux/security.h>
14 : #include <linux/cdev.h>
15 : #include <linux/memblock.h>
16 : #include <linux/fsnotify.h>
17 : #include <linux/mount.h>
18 : #include <linux/posix_acl.h>
19 : #include <linux/prefetch.h>
20 : #include <linux/buffer_head.h> /* for inode_has_buffers */
21 : #include <linux/ratelimit.h>
22 : #include <linux/list_lru.h>
23 : #include <linux/iversion.h>
24 : #include <trace/events/writeback.h>
25 : #include "internal.h"
26 :
27 : /*
28 : * Inode locking rules:
29 : *
30 : * inode->i_lock protects:
31 : * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
32 : * Inode LRU list locks protect:
33 : * inode->i_sb->s_inode_lru, inode->i_lru
34 : * inode->i_sb->s_inode_list_lock protects:
35 : * inode->i_sb->s_inodes, inode->i_sb_list
36 : * bdi->wb.list_lock protects:
37 : * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
38 : * inode_hash_lock protects:
39 : * inode_hashtable, inode->i_hash
40 : *
41 : * Lock ordering:
42 : *
43 : * inode->i_sb->s_inode_list_lock
44 : * inode->i_lock
45 : * Inode LRU list locks
46 : *
47 : * bdi->wb.list_lock
48 : * inode->i_lock
49 : *
50 : * inode_hash_lock
51 : * inode->i_sb->s_inode_list_lock
52 : * inode->i_lock
53 : *
54 : * iunique_lock
55 : * inode_hash_lock
56 : */
57 :
58 : static unsigned int i_hash_mask __read_mostly;
59 : static unsigned int i_hash_shift __read_mostly;
60 : static struct hlist_head *inode_hashtable __read_mostly;
61 : static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
62 :
63 : /*
64 : * Empty aops. Can be used for the cases where the user does not
65 : * define any of the address_space operations.
66 : */
67 : const struct address_space_operations empty_aops = {
68 : };
69 : EXPORT_SYMBOL(empty_aops);
70 :
71 : static DEFINE_PER_CPU(unsigned long, nr_inodes);
72 : static DEFINE_PER_CPU(unsigned long, nr_unused);
73 :
74 : static struct kmem_cache *inode_cachep __read_mostly;
75 :
76 : static long get_nr_inodes(void)
77 : {
78 : int i;
79 17 : long sum = 0;
80 34 : for_each_possible_cpu(i)
81 17 : sum += per_cpu(nr_inodes, i);
82 17 : return sum < 0 ? 0 : sum;
83 : }
84 :
85 : static inline long get_nr_inodes_unused(void)
86 : {
87 : int i;
88 : long sum = 0;
89 17 : for_each_possible_cpu(i)
90 17 : sum += per_cpu(nr_unused, i);
91 17 : return sum < 0 ? 0 : sum;
92 : }
93 :
94 17 : long get_nr_dirty_inodes(void)
95 : {
96 : /* not actually dirty inodes, but a wild approximation */
97 34 : long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
98 17 : return nr_dirty > 0 ? nr_dirty : 0;
99 : }
100 :
101 : /*
102 : * Handle nr_inode sysctl
103 : */
104 : #ifdef CONFIG_SYSCTL
105 : /*
106 : * Statistics gathering..
107 : */
108 : static struct inodes_stat_t inodes_stat;
109 :
110 0 : static int proc_nr_inodes(struct ctl_table *table, int write, void *buffer,
111 : size_t *lenp, loff_t *ppos)
112 : {
113 0 : inodes_stat.nr_inodes = get_nr_inodes();
114 0 : inodes_stat.nr_unused = get_nr_inodes_unused();
115 0 : return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
116 : }
117 :
118 : static struct ctl_table inodes_sysctls[] = {
119 : {
120 : .procname = "inode-nr",
121 : .data = &inodes_stat,
122 : .maxlen = 2*sizeof(long),
123 : .mode = 0444,
124 : .proc_handler = proc_nr_inodes,
125 : },
126 : {
127 : .procname = "inode-state",
128 : .data = &inodes_stat,
129 : .maxlen = 7*sizeof(long),
130 : .mode = 0444,
131 : .proc_handler = proc_nr_inodes,
132 : },
133 : { }
134 : };
135 :
136 1 : static int __init init_fs_inode_sysctls(void)
137 : {
138 1 : register_sysctl_init("fs", inodes_sysctls);
139 1 : return 0;
140 : }
141 : early_initcall(init_fs_inode_sysctls);
142 : #endif
143 :
144 0 : static int no_open(struct inode *inode, struct file *file)
145 : {
146 0 : return -ENXIO;
147 : }
148 :
149 : /**
150 : * inode_init_always - perform inode structure initialisation
151 : * @sb: superblock inode belongs to
152 : * @inode: inode to initialise
153 : *
154 : * These are initializations that need to be done on every inode
155 : * allocation as the fields are not initialised by slab allocation.
156 : */
157 51 : int inode_init_always(struct super_block *sb, struct inode *inode)
158 : {
159 : static const struct inode_operations empty_iops;
160 : static const struct file_operations no_open_fops = {.open = no_open};
161 51 : struct address_space *const mapping = &inode->i_data;
162 :
163 51 : inode->i_sb = sb;
164 51 : inode->i_blkbits = sb->s_blocksize_bits;
165 51 : inode->i_flags = 0;
166 102 : atomic64_set(&inode->i_sequence, 0);
167 102 : atomic_set(&inode->i_count, 1);
168 51 : inode->i_op = &empty_iops;
169 51 : inode->i_fop = &no_open_fops;
170 51 : inode->i_ino = 0;
171 51 : inode->__i_nlink = 1;
172 51 : inode->i_opflags = 0;
173 51 : if (sb->s_xattr)
174 0 : inode->i_opflags |= IOP_XATTR;
175 51 : i_uid_write(inode, 0);
176 51 : i_gid_write(inode, 0);
177 102 : atomic_set(&inode->i_writecount, 0);
178 51 : inode->i_size = 0;
179 51 : inode->i_write_hint = WRITE_LIFE_NOT_SET;
180 51 : inode->i_blocks = 0;
181 51 : inode->i_bytes = 0;
182 51 : inode->i_generation = 0;
183 51 : inode->i_pipe = NULL;
184 : inode->i_cdev = NULL;
185 : inode->i_link = NULL;
186 : inode->i_dir_seq = 0;
187 51 : inode->i_rdev = 0;
188 51 : inode->dirtied_when = 0;
189 :
190 : #ifdef CONFIG_CGROUP_WRITEBACK
191 : inode->i_wb_frn_winner = 0;
192 : inode->i_wb_frn_avg_time = 0;
193 : inode->i_wb_frn_history = 0;
194 : #endif
195 :
196 51 : spin_lock_init(&inode->i_lock);
197 : lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
198 :
199 51 : init_rwsem(&inode->i_rwsem);
200 : lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
201 :
202 102 : atomic_set(&inode->i_dio_count, 0);
203 :
204 51 : mapping->a_ops = &empty_aops;
205 51 : mapping->host = inode;
206 51 : mapping->flags = 0;
207 51 : mapping->wb_err = 0;
208 102 : atomic_set(&mapping->i_mmap_writable, 0);
209 : #ifdef CONFIG_READ_ONLY_THP_FOR_FS
210 : atomic_set(&mapping->nr_thps, 0);
211 : #endif
212 102 : mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
213 51 : mapping->private_data = NULL;
214 51 : mapping->writeback_index = 0;
215 51 : init_rwsem(&mapping->invalidate_lock);
216 : lockdep_set_class_and_name(&mapping->invalidate_lock,
217 : &sb->s_type->invalidate_lock_key,
218 : "mapping.invalidate_lock");
219 51 : inode->i_private = NULL;
220 51 : inode->i_mapping = mapping;
221 51 : INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
222 : #ifdef CONFIG_FS_POSIX_ACL
223 : inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
224 : #endif
225 :
226 : #ifdef CONFIG_FSNOTIFY
227 51 : inode->i_fsnotify_mask = 0;
228 : #endif
229 51 : inode->i_flctx = NULL;
230 :
231 51 : if (unlikely(security_inode_alloc(inode)))
232 : return -ENOMEM;
233 153 : this_cpu_inc(nr_inodes);
234 :
235 : return 0;
236 : }
237 : EXPORT_SYMBOL(inode_init_always);
238 :
239 0 : void free_inode_nonrcu(struct inode *inode)
240 : {
241 34 : kmem_cache_free(inode_cachep, inode);
242 0 : }
243 : EXPORT_SYMBOL(free_inode_nonrcu);
244 :
245 34 : static void i_callback(struct rcu_head *head)
246 : {
247 34 : struct inode *inode = container_of(head, struct inode, i_rcu);
248 34 : if (inode->free_inode)
249 0 : inode->free_inode(inode);
250 : else
251 : free_inode_nonrcu(inode);
252 34 : }
253 :
254 51 : static struct inode *alloc_inode(struct super_block *sb)
255 : {
256 51 : const struct super_operations *ops = sb->s_op;
257 : struct inode *inode;
258 :
259 51 : if (ops->alloc_inode)
260 5 : inode = ops->alloc_inode(sb);
261 : else
262 92 : inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL);
263 :
264 51 : if (!inode)
265 : return NULL;
266 :
267 51 : if (unlikely(inode_init_always(sb, inode))) {
268 0 : if (ops->destroy_inode) {
269 0 : ops->destroy_inode(inode);
270 0 : if (!ops->free_inode)
271 : return NULL;
272 : }
273 0 : inode->free_inode = ops->free_inode;
274 0 : i_callback(&inode->i_rcu);
275 0 : return NULL;
276 : }
277 :
278 : return inode;
279 : }
280 :
281 34 : void __destroy_inode(struct inode *inode)
282 : {
283 34 : BUG_ON(inode_has_buffers(inode));
284 34 : inode_detach_wb(inode);
285 34 : security_inode_free(inode);
286 34 : fsnotify_inode_delete(inode);
287 34 : locks_free_lock_context(inode);
288 34 : if (!inode->i_nlink) {
289 0 : WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
290 0 : atomic_long_dec(&inode->i_sb->s_remove_count);
291 : }
292 :
293 : #ifdef CONFIG_FS_POSIX_ACL
294 : if (inode->i_acl && !is_uncached_acl(inode->i_acl))
295 : posix_acl_release(inode->i_acl);
296 : if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
297 : posix_acl_release(inode->i_default_acl);
298 : #endif
299 102 : this_cpu_dec(nr_inodes);
300 34 : }
301 : EXPORT_SYMBOL(__destroy_inode);
302 :
303 34 : static void destroy_inode(struct inode *inode)
304 : {
305 34 : const struct super_operations *ops = inode->i_sb->s_op;
306 :
307 68 : BUG_ON(!list_empty(&inode->i_lru));
308 34 : __destroy_inode(inode);
309 34 : if (ops->destroy_inode) {
310 0 : ops->destroy_inode(inode);
311 0 : if (!ops->free_inode)
312 : return;
313 : }
314 34 : inode->free_inode = ops->free_inode;
315 34 : call_rcu(&inode->i_rcu, i_callback);
316 : }
317 :
318 : /**
319 : * drop_nlink - directly drop an inode's link count
320 : * @inode: inode
321 : *
322 : * This is a low-level filesystem helper to replace any
323 : * direct filesystem manipulation of i_nlink. In cases
324 : * where we are attempting to track writes to the
325 : * filesystem, a decrement to zero means an imminent
326 : * write when the file is truncated and actually unlinked
327 : * on the filesystem.
328 : */
329 0 : void drop_nlink(struct inode *inode)
330 : {
331 0 : WARN_ON(inode->i_nlink == 0);
332 0 : inode->__i_nlink--;
333 0 : if (!inode->i_nlink)
334 0 : atomic_long_inc(&inode->i_sb->s_remove_count);
335 0 : }
336 : EXPORT_SYMBOL(drop_nlink);
337 :
338 : /**
339 : * clear_nlink - directly zero an inode's link count
340 : * @inode: inode
341 : *
342 : * This is a low-level filesystem helper to replace any
343 : * direct filesystem manipulation of i_nlink. See
344 : * drop_nlink() for why we care about i_nlink hitting zero.
345 : */
346 0 : void clear_nlink(struct inode *inode)
347 : {
348 0 : if (inode->i_nlink) {
349 0 : inode->__i_nlink = 0;
350 0 : atomic_long_inc(&inode->i_sb->s_remove_count);
351 : }
352 0 : }
353 : EXPORT_SYMBOL(clear_nlink);
354 :
355 : /**
356 : * set_nlink - directly set an inode's link count
357 : * @inode: inode
358 : * @nlink: new nlink (should be non-zero)
359 : *
360 : * This is a low-level filesystem helper to replace any
361 : * direct filesystem manipulation of i_nlink.
362 : */
363 1 : void set_nlink(struct inode *inode, unsigned int nlink)
364 : {
365 1 : if (!nlink) {
366 : clear_nlink(inode);
367 : } else {
368 : /* Yes, some filesystems do change nlink from zero to one */
369 1 : if (inode->i_nlink == 0)
370 0 : atomic_long_dec(&inode->i_sb->s_remove_count);
371 :
372 1 : inode->__i_nlink = nlink;
373 : }
374 1 : }
375 : EXPORT_SYMBOL(set_nlink);
376 :
377 : /**
378 : * inc_nlink - directly increment an inode's link count
379 : * @inode: inode
380 : *
381 : * This is a low-level filesystem helper to replace any
382 : * direct filesystem manipulation of i_nlink. Currently,
383 : * it is only here for parity with dec_nlink().
384 : */
385 6 : void inc_nlink(struct inode *inode)
386 : {
387 6 : if (unlikely(inode->i_nlink == 0)) {
388 0 : WARN_ON(!(inode->i_state & I_LINKABLE));
389 0 : atomic_long_dec(&inode->i_sb->s_remove_count);
390 : }
391 :
392 6 : inode->__i_nlink++;
393 6 : }
394 : EXPORT_SYMBOL(inc_nlink);
395 :
396 : static void __address_space_init_once(struct address_space *mapping)
397 : {
398 124 : xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
399 62 : init_rwsem(&mapping->i_mmap_rwsem);
400 124 : INIT_LIST_HEAD(&mapping->private_list);
401 62 : spin_lock_init(&mapping->private_lock);
402 62 : mapping->i_mmap = RB_ROOT_CACHED;
403 : }
404 :
405 0 : void address_space_init_once(struct address_space *mapping)
406 : {
407 0 : memset(mapping, 0, sizeof(*mapping));
408 0 : __address_space_init_once(mapping);
409 0 : }
410 : EXPORT_SYMBOL(address_space_init_once);
411 :
412 : /*
413 : * These are initializations that only need to be done
414 : * once, because the fields are idempotent across use
415 : * of the inode, so let the slab aware of that.
416 : */
417 62 : void inode_init_once(struct inode *inode)
418 : {
419 62 : memset(inode, 0, sizeof(*inode));
420 124 : INIT_HLIST_NODE(&inode->i_hash);
421 124 : INIT_LIST_HEAD(&inode->i_devices);
422 124 : INIT_LIST_HEAD(&inode->i_io_list);
423 124 : INIT_LIST_HEAD(&inode->i_wb_list);
424 124 : INIT_LIST_HEAD(&inode->i_lru);
425 124 : INIT_LIST_HEAD(&inode->i_sb_list);
426 124 : __address_space_init_once(&inode->i_data);
427 : i_size_ordered_init(inode);
428 62 : }
429 : EXPORT_SYMBOL(inode_init_once);
430 :
431 14 : static void init_once(void *foo)
432 : {
433 14 : struct inode *inode = (struct inode *) foo;
434 :
435 14 : inode_init_once(inode);
436 14 : }
437 :
438 : /*
439 : * inode->i_lock must be held
440 : */
441 0 : void __iget(struct inode *inode)
442 : {
443 0 : atomic_inc(&inode->i_count);
444 0 : }
445 :
446 : /*
447 : * get additional reference to inode; caller must already hold one.
448 : */
449 0 : void ihold(struct inode *inode)
450 : {
451 0 : WARN_ON(atomic_inc_return(&inode->i_count) < 2);
452 0 : }
453 : EXPORT_SYMBOL(ihold);
454 :
455 0 : static void __inode_add_lru(struct inode *inode, bool rotate)
456 : {
457 0 : if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
458 : return;
459 0 : if (atomic_read(&inode->i_count))
460 : return;
461 0 : if (!(inode->i_sb->s_flags & SB_ACTIVE))
462 : return;
463 0 : if (!mapping_shrinkable(&inode->i_data))
464 : return;
465 :
466 0 : if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
467 0 : this_cpu_inc(nr_unused);
468 0 : else if (rotate)
469 0 : inode->i_state |= I_REFERENCED;
470 : }
471 :
472 : /*
473 : * Add inode to LRU if needed (inode is unused and clean).
474 : *
475 : * Needs inode->i_lock held.
476 : */
477 0 : void inode_add_lru(struct inode *inode)
478 : {
479 0 : __inode_add_lru(inode, false);
480 0 : }
481 :
482 0 : static void inode_lru_list_del(struct inode *inode)
483 : {
484 0 : if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
485 0 : this_cpu_dec(nr_unused);
486 0 : }
487 :
488 : /**
489 : * inode_sb_list_add - add inode to the superblock list of inodes
490 : * @inode: inode to add
491 : */
492 0 : void inode_sb_list_add(struct inode *inode)
493 : {
494 64 : spin_lock(&inode->i_sb->s_inode_list_lock);
495 64 : list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
496 64 : spin_unlock(&inode->i_sb->s_inode_list_lock);
497 0 : }
498 : EXPORT_SYMBOL_GPL(inode_sb_list_add);
499 :
500 : static inline void inode_sb_list_del(struct inode *inode)
501 : {
502 68 : if (!list_empty(&inode->i_sb_list)) {
503 34 : spin_lock(&inode->i_sb->s_inode_list_lock);
504 34 : list_del_init(&inode->i_sb_list);
505 17 : spin_unlock(&inode->i_sb->s_inode_list_lock);
506 : }
507 : }
508 :
509 : static unsigned long hash(struct super_block *sb, unsigned long hashval)
510 : {
511 : unsigned long tmp;
512 :
513 0 : tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
514 : L1_CACHE_BYTES;
515 0 : tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
516 0 : return tmp & i_hash_mask;
517 : }
518 :
519 : /**
520 : * __insert_inode_hash - hash an inode
521 : * @inode: unhashed inode
522 : * @hashval: unsigned long value used to locate this object in the
523 : * inode_hashtable.
524 : *
525 : * Add an inode to the inode hash for this superblock.
526 : */
527 0 : void __insert_inode_hash(struct inode *inode, unsigned long hashval)
528 : {
529 0 : struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
530 :
531 0 : spin_lock(&inode_hash_lock);
532 0 : spin_lock(&inode->i_lock);
533 0 : hlist_add_head_rcu(&inode->i_hash, b);
534 0 : spin_unlock(&inode->i_lock);
535 0 : spin_unlock(&inode_hash_lock);
536 0 : }
537 : EXPORT_SYMBOL(__insert_inode_hash);
538 :
539 : /**
540 : * __remove_inode_hash - remove an inode from the hash
541 : * @inode: inode to unhash
542 : *
543 : * Remove an inode from the superblock.
544 : */
545 0 : void __remove_inode_hash(struct inode *inode)
546 : {
547 0 : spin_lock(&inode_hash_lock);
548 0 : spin_lock(&inode->i_lock);
549 0 : hlist_del_init_rcu(&inode->i_hash);
550 0 : spin_unlock(&inode->i_lock);
551 0 : spin_unlock(&inode_hash_lock);
552 0 : }
553 : EXPORT_SYMBOL(__remove_inode_hash);
554 :
555 0 : void dump_mapping(const struct address_space *mapping)
556 : {
557 : struct inode *host;
558 : const struct address_space_operations *a_ops;
559 : struct hlist_node *dentry_first;
560 : struct dentry *dentry_ptr;
561 : struct dentry dentry;
562 : unsigned long ino;
563 :
564 : /*
565 : * If mapping is an invalid pointer, we don't want to crash
566 : * accessing it, so probe everything depending on it carefully.
567 : */
568 0 : if (get_kernel_nofault(host, &mapping->host) ||
569 0 : get_kernel_nofault(a_ops, &mapping->a_ops)) {
570 0 : pr_warn("invalid mapping:%px\n", mapping);
571 0 : return;
572 : }
573 :
574 0 : if (!host) {
575 0 : pr_warn("aops:%ps\n", a_ops);
576 0 : return;
577 : }
578 :
579 0 : if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
580 0 : get_kernel_nofault(ino, &host->i_ino)) {
581 0 : pr_warn("aops:%ps invalid inode:%px\n", a_ops, host);
582 0 : return;
583 : }
584 :
585 0 : if (!dentry_first) {
586 0 : pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
587 0 : return;
588 : }
589 :
590 0 : dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
591 0 : if (get_kernel_nofault(dentry, dentry_ptr)) {
592 0 : pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
593 : a_ops, ino, dentry_ptr);
594 0 : return;
595 : }
596 :
597 : /*
598 : * if dentry is corrupted, the %pd handler may still crash,
599 : * but it's unlikely that we reach here with a corrupt mapping
600 : */
601 0 : pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops, ino, &dentry);
602 : }
603 :
604 34 : void clear_inode(struct inode *inode)
605 : {
606 : /*
607 : * We have to cycle the i_pages lock here because reclaim can be in the
608 : * process of removing the last page (in __filemap_remove_folio())
609 : * and we must not free the mapping under it.
610 : */
611 68 : xa_lock_irq(&inode->i_data.i_pages);
612 34 : BUG_ON(inode->i_data.nrpages);
613 : /*
614 : * Almost always, mapping_empty(&inode->i_data) here; but there are
615 : * two known and long-standing ways in which nodes may get left behind
616 : * (when deep radix-tree node allocation failed partway; or when THP
617 : * collapse_file() failed). Until those two known cases are cleaned up,
618 : * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
619 : * nor even WARN_ON(!mapping_empty).
620 : */
621 68 : xa_unlock_irq(&inode->i_data.i_pages);
622 68 : BUG_ON(!list_empty(&inode->i_data.private_list));
623 34 : BUG_ON(!(inode->i_state & I_FREEING));
624 34 : BUG_ON(inode->i_state & I_CLEAR);
625 68 : BUG_ON(!list_empty(&inode->i_wb_list));
626 : /* don't need i_lock here, no concurrent mods to i_state */
627 34 : inode->i_state = I_FREEING | I_CLEAR;
628 34 : }
629 : EXPORT_SYMBOL(clear_inode);
630 :
631 : /*
632 : * Free the inode passed in, removing it from the lists it is still connected
633 : * to. We remove any pages still attached to the inode and wait for any IO that
634 : * is still in progress before finally destroying the inode.
635 : *
636 : * An inode must already be marked I_FREEING so that we avoid the inode being
637 : * moved back onto lists if we race with other code that manipulates the lists
638 : * (e.g. writeback_single_inode). The caller is responsible for setting this.
639 : *
640 : * An inode must already be removed from the LRU list before being evicted from
641 : * the cache. This should occur atomically with setting the I_FREEING state
642 : * flag, so no inodes here should ever be on the LRU when being evicted.
643 : */
644 34 : static void evict(struct inode *inode)
645 : {
646 34 : const struct super_operations *op = inode->i_sb->s_op;
647 :
648 34 : BUG_ON(!(inode->i_state & I_FREEING));
649 68 : BUG_ON(!list_empty(&inode->i_lru));
650 :
651 68 : if (!list_empty(&inode->i_io_list))
652 0 : inode_io_list_del(inode);
653 :
654 34 : inode_sb_list_del(inode);
655 :
656 : /*
657 : * Wait for flusher thread to be done with the inode so that filesystem
658 : * does not start destroying it while writeback is still running. Since
659 : * the inode has I_FREEING set, flusher thread won't start new work on
660 : * the inode. We just have to wait for running writeback to finish.
661 : */
662 34 : inode_wait_for_writeback(inode);
663 :
664 34 : if (op->evict_inode) {
665 0 : op->evict_inode(inode);
666 : } else {
667 34 : truncate_inode_pages_final(&inode->i_data);
668 34 : clear_inode(inode);
669 : }
670 34 : if (S_ISCHR(inode->i_mode) && inode->i_cdev)
671 0 : cd_forget(inode);
672 :
673 34 : remove_inode_hash(inode);
674 :
675 68 : spin_lock(&inode->i_lock);
676 34 : wake_up_bit(&inode->i_state, __I_NEW);
677 34 : BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
678 68 : spin_unlock(&inode->i_lock);
679 :
680 34 : destroy_inode(inode);
681 34 : }
682 :
683 : /*
684 : * dispose_list - dispose of the contents of a local list
685 : * @head: the head of the list to free
686 : *
687 : * Dispose-list gets a local list with local inodes in it, so it doesn't
688 : * need to worry about list corruption and SMP locks.
689 : */
690 17 : static void dispose_list(struct list_head *head)
691 : {
692 34 : while (!list_empty(head)) {
693 : struct inode *inode;
694 :
695 0 : inode = list_first_entry(head, struct inode, i_lru);
696 0 : list_del_init(&inode->i_lru);
697 :
698 0 : evict(inode);
699 0 : cond_resched();
700 : }
701 17 : }
702 :
703 : /**
704 : * evict_inodes - evict all evictable inodes for a superblock
705 : * @sb: superblock to operate on
706 : *
707 : * Make sure that no inodes with zero refcount are retained. This is
708 : * called by superblock shutdown after having SB_ACTIVE flag removed,
709 : * so any inode reaching zero refcount during or after that call will
710 : * be immediately evicted.
711 : */
712 17 : void evict_inodes(struct super_block *sb)
713 : {
714 : struct inode *inode, *next;
715 17 : LIST_HEAD(dispose);
716 :
717 : again:
718 34 : spin_lock(&sb->s_inode_list_lock);
719 17 : list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
720 0 : if (atomic_read(&inode->i_count))
721 0 : continue;
722 :
723 0 : spin_lock(&inode->i_lock);
724 0 : if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
725 0 : spin_unlock(&inode->i_lock);
726 0 : continue;
727 : }
728 :
729 0 : inode->i_state |= I_FREEING;
730 0 : inode_lru_list_del(inode);
731 0 : spin_unlock(&inode->i_lock);
732 0 : list_add(&inode->i_lru, &dispose);
733 :
734 : /*
735 : * We can have a ton of inodes to evict at unmount time given
736 : * enough memory, check to see if we need to go to sleep for a
737 : * bit so we don't livelock.
738 : */
739 0 : if (need_resched()) {
740 0 : spin_unlock(&sb->s_inode_list_lock);
741 0 : cond_resched();
742 0 : dispose_list(&dispose);
743 0 : goto again;
744 : }
745 : }
746 34 : spin_unlock(&sb->s_inode_list_lock);
747 :
748 17 : dispose_list(&dispose);
749 17 : }
750 : EXPORT_SYMBOL_GPL(evict_inodes);
751 :
752 : /**
753 : * invalidate_inodes - attempt to free all inodes on a superblock
754 : * @sb: superblock to operate on
755 : * @kill_dirty: flag to guide handling of dirty inodes
756 : *
757 : * Attempts to free all inodes for a given superblock. If there were any
758 : * busy inodes return a non-zero value, else zero.
759 : * If @kill_dirty is set, discard dirty inodes too, otherwise treat
760 : * them as busy.
761 : */
762 0 : int invalidate_inodes(struct super_block *sb, bool kill_dirty)
763 : {
764 0 : int busy = 0;
765 : struct inode *inode, *next;
766 0 : LIST_HEAD(dispose);
767 :
768 : again:
769 0 : spin_lock(&sb->s_inode_list_lock);
770 0 : list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
771 0 : spin_lock(&inode->i_lock);
772 0 : if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
773 0 : spin_unlock(&inode->i_lock);
774 0 : continue;
775 : }
776 0 : if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
777 0 : spin_unlock(&inode->i_lock);
778 0 : busy = 1;
779 0 : continue;
780 : }
781 0 : if (atomic_read(&inode->i_count)) {
782 0 : spin_unlock(&inode->i_lock);
783 0 : busy = 1;
784 0 : continue;
785 : }
786 :
787 0 : inode->i_state |= I_FREEING;
788 0 : inode_lru_list_del(inode);
789 0 : spin_unlock(&inode->i_lock);
790 0 : list_add(&inode->i_lru, &dispose);
791 0 : if (need_resched()) {
792 0 : spin_unlock(&sb->s_inode_list_lock);
793 0 : cond_resched();
794 0 : dispose_list(&dispose);
795 0 : goto again;
796 : }
797 : }
798 0 : spin_unlock(&sb->s_inode_list_lock);
799 :
800 0 : dispose_list(&dispose);
801 :
802 0 : return busy;
803 : }
804 :
805 : /*
806 : * Isolate the inode from the LRU in preparation for freeing it.
807 : *
808 : * If the inode has the I_REFERENCED flag set, then it means that it has been
809 : * used recently - the flag is set in iput_final(). When we encounter such an
810 : * inode, clear the flag and move it to the back of the LRU so it gets another
811 : * pass through the LRU before it gets reclaimed. This is necessary because of
812 : * the fact we are doing lazy LRU updates to minimise lock contention so the
813 : * LRU does not have strict ordering. Hence we don't want to reclaim inodes
814 : * with this flag set because they are the inodes that are out of order.
815 : */
816 0 : static enum lru_status inode_lru_isolate(struct list_head *item,
817 : struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
818 : {
819 0 : struct list_head *freeable = arg;
820 0 : struct inode *inode = container_of(item, struct inode, i_lru);
821 :
822 : /*
823 : * We are inverting the lru lock/inode->i_lock here, so use a
824 : * trylock. If we fail to get the lock, just skip it.
825 : */
826 0 : if (!spin_trylock(&inode->i_lock))
827 : return LRU_SKIP;
828 :
829 : /*
830 : * Inodes can get referenced, redirtied, or repopulated while
831 : * they're already on the LRU, and this can make them
832 : * unreclaimable for a while. Remove them lazily here; iput,
833 : * sync, or the last page cache deletion will requeue them.
834 : */
835 0 : if (atomic_read(&inode->i_count) ||
836 0 : (inode->i_state & ~I_REFERENCED) ||
837 0 : !mapping_shrinkable(&inode->i_data)) {
838 0 : list_lru_isolate(lru, &inode->i_lru);
839 0 : spin_unlock(&inode->i_lock);
840 0 : this_cpu_dec(nr_unused);
841 0 : return LRU_REMOVED;
842 : }
843 :
844 : /* Recently referenced inodes get one more pass */
845 0 : if (inode->i_state & I_REFERENCED) {
846 0 : inode->i_state &= ~I_REFERENCED;
847 0 : spin_unlock(&inode->i_lock);
848 0 : return LRU_ROTATE;
849 : }
850 :
851 : /*
852 : * On highmem systems, mapping_shrinkable() permits dropping
853 : * page cache in order to free up struct inodes: lowmem might
854 : * be under pressure before the cache inside the highmem zone.
855 : */
856 0 : if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
857 0 : __iget(inode);
858 0 : spin_unlock(&inode->i_lock);
859 0 : spin_unlock(lru_lock);
860 0 : if (remove_inode_buffers(inode)) {
861 : unsigned long reap;
862 0 : reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
863 0 : if (current_is_kswapd())
864 0 : __count_vm_events(KSWAPD_INODESTEAL, reap);
865 : else
866 0 : __count_vm_events(PGINODESTEAL, reap);
867 0 : if (current->reclaim_state)
868 0 : current->reclaim_state->reclaimed_slab += reap;
869 : }
870 0 : iput(inode);
871 0 : spin_lock(lru_lock);
872 0 : return LRU_RETRY;
873 : }
874 :
875 0 : WARN_ON(inode->i_state & I_NEW);
876 0 : inode->i_state |= I_FREEING;
877 0 : list_lru_isolate_move(lru, &inode->i_lru, freeable);
878 0 : spin_unlock(&inode->i_lock);
879 :
880 0 : this_cpu_dec(nr_unused);
881 0 : return LRU_REMOVED;
882 : }
883 :
884 : /*
885 : * Walk the superblock inode LRU for freeable inodes and attempt to free them.
886 : * This is called from the superblock shrinker function with a number of inodes
887 : * to trim from the LRU. Inodes to be freed are moved to a temporary list and
888 : * then are freed outside inode_lock by dispose_list().
889 : */
890 0 : long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
891 : {
892 0 : LIST_HEAD(freeable);
893 : long freed;
894 :
895 0 : freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
896 : inode_lru_isolate, &freeable);
897 0 : dispose_list(&freeable);
898 0 : return freed;
899 : }
900 :
901 : static void __wait_on_freeing_inode(struct inode *inode);
902 : /*
903 : * Called with the inode lock held.
904 : */
905 0 : static struct inode *find_inode(struct super_block *sb,
906 : struct hlist_head *head,
907 : int (*test)(struct inode *, void *),
908 : void *data)
909 : {
910 0 : struct inode *inode = NULL;
911 :
912 : repeat:
913 0 : hlist_for_each_entry(inode, head, i_hash) {
914 0 : if (inode->i_sb != sb)
915 0 : continue;
916 0 : if (!test(inode, data))
917 0 : continue;
918 0 : spin_lock(&inode->i_lock);
919 0 : if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
920 0 : __wait_on_freeing_inode(inode);
921 0 : goto repeat;
922 : }
923 0 : if (unlikely(inode->i_state & I_CREATING)) {
924 0 : spin_unlock(&inode->i_lock);
925 0 : return ERR_PTR(-ESTALE);
926 : }
927 0 : __iget(inode);
928 0 : spin_unlock(&inode->i_lock);
929 0 : return inode;
930 : }
931 : return NULL;
932 : }
933 :
934 : /*
935 : * find_inode_fast is the fast path version of find_inode, see the comment at
936 : * iget_locked for details.
937 : */
938 0 : static struct inode *find_inode_fast(struct super_block *sb,
939 : struct hlist_head *head, unsigned long ino)
940 : {
941 0 : struct inode *inode = NULL;
942 :
943 : repeat:
944 0 : hlist_for_each_entry(inode, head, i_hash) {
945 0 : if (inode->i_ino != ino)
946 0 : continue;
947 0 : if (inode->i_sb != sb)
948 0 : continue;
949 0 : spin_lock(&inode->i_lock);
950 0 : if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
951 0 : __wait_on_freeing_inode(inode);
952 0 : goto repeat;
953 : }
954 0 : if (unlikely(inode->i_state & I_CREATING)) {
955 0 : spin_unlock(&inode->i_lock);
956 0 : return ERR_PTR(-ESTALE);
957 : }
958 0 : __iget(inode);
959 0 : spin_unlock(&inode->i_lock);
960 0 : return inode;
961 : }
962 : return NULL;
963 : }
964 :
965 : /*
966 : * Each cpu owns a range of LAST_INO_BATCH numbers.
967 : * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
968 : * to renew the exhausted range.
969 : *
970 : * This does not significantly increase overflow rate because every CPU can
971 : * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
972 : * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
973 : * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
974 : * overflow rate by 2x, which does not seem too significant.
975 : *
976 : * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
977 : * error if st_ino won't fit in target struct field. Use 32bit counter
978 : * here to attempt to avoid that.
979 : */
980 : #define LAST_INO_BATCH 1024
981 : static DEFINE_PER_CPU(unsigned int, last_ino);
982 :
983 23 : unsigned int get_next_ino(void)
984 : {
985 23 : unsigned int *p = &get_cpu_var(last_ino);
986 23 : unsigned int res = *p;
987 :
988 : #ifdef CONFIG_SMP
989 : if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
990 : static atomic_t shared_last_ino;
991 : int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
992 :
993 : res = next - LAST_INO_BATCH;
994 : }
995 : #endif
996 :
997 23 : res++;
998 : /* get_next_ino should not provide a 0 inode number */
999 23 : if (unlikely(!res))
1000 0 : res++;
1001 23 : *p = res;
1002 23 : put_cpu_var(last_ino);
1003 23 : return res;
1004 : }
1005 : EXPORT_SYMBOL(get_next_ino);
1006 :
1007 : /**
1008 : * new_inode_pseudo - obtain an inode
1009 : * @sb: superblock
1010 : *
1011 : * Allocates a new inode for given superblock.
1012 : * Inode wont be chained in superblock s_inodes list
1013 : * This means :
1014 : * - fs can't be unmount
1015 : * - quotas, fsnotify, writeback can't work
1016 : */
1017 19 : struct inode *new_inode_pseudo(struct super_block *sb)
1018 : {
1019 51 : struct inode *inode = alloc_inode(sb);
1020 :
1021 51 : if (inode) {
1022 102 : spin_lock(&inode->i_lock);
1023 51 : inode->i_state = 0;
1024 51 : spin_unlock(&inode->i_lock);
1025 : }
1026 19 : return inode;
1027 : }
1028 :
1029 : /**
1030 : * new_inode - obtain an inode
1031 : * @sb: superblock
1032 : *
1033 : * Allocates a new inode for given superblock. The default gfp_mask
1034 : * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
1035 : * If HIGHMEM pages are unsuitable or it is known that pages allocated
1036 : * for the page cache are not reclaimable or migratable,
1037 : * mapping_set_gfp_mask() must be called with suitable flags on the
1038 : * newly created inode's mapping
1039 : *
1040 : */
1041 32 : struct inode *new_inode(struct super_block *sb)
1042 : {
1043 : struct inode *inode;
1044 :
1045 32 : spin_lock_prefetch(&sb->s_inode_list_lock);
1046 :
1047 32 : inode = new_inode_pseudo(sb);
1048 32 : if (inode)
1049 : inode_sb_list_add(inode);
1050 32 : return inode;
1051 : }
1052 : EXPORT_SYMBOL(new_inode);
1053 :
1054 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
1055 : void lockdep_annotate_inode_mutex_key(struct inode *inode)
1056 : {
1057 : if (S_ISDIR(inode->i_mode)) {
1058 : struct file_system_type *type = inode->i_sb->s_type;
1059 :
1060 : /* Set new key only if filesystem hasn't already changed it */
1061 : if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
1062 : /*
1063 : * ensure nobody is actually holding i_mutex
1064 : */
1065 : // mutex_destroy(&inode->i_mutex);
1066 : init_rwsem(&inode->i_rwsem);
1067 : lockdep_set_class(&inode->i_rwsem,
1068 : &type->i_mutex_dir_key);
1069 : }
1070 : }
1071 : }
1072 : EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
1073 : #endif
1074 :
1075 : /**
1076 : * unlock_new_inode - clear the I_NEW state and wake up any waiters
1077 : * @inode: new inode to unlock
1078 : *
1079 : * Called when the inode is fully initialised to clear the new state of the
1080 : * inode and wake up anyone waiting for the inode to finish initialisation.
1081 : */
1082 0 : void unlock_new_inode(struct inode *inode)
1083 : {
1084 0 : lockdep_annotate_inode_mutex_key(inode);
1085 0 : spin_lock(&inode->i_lock);
1086 0 : WARN_ON(!(inode->i_state & I_NEW));
1087 0 : inode->i_state &= ~I_NEW & ~I_CREATING;
1088 0 : smp_mb();
1089 0 : wake_up_bit(&inode->i_state, __I_NEW);
1090 0 : spin_unlock(&inode->i_lock);
1091 0 : }
1092 : EXPORT_SYMBOL(unlock_new_inode);
1093 :
1094 0 : void discard_new_inode(struct inode *inode)
1095 : {
1096 0 : lockdep_annotate_inode_mutex_key(inode);
1097 0 : spin_lock(&inode->i_lock);
1098 0 : WARN_ON(!(inode->i_state & I_NEW));
1099 0 : inode->i_state &= ~I_NEW;
1100 0 : smp_mb();
1101 0 : wake_up_bit(&inode->i_state, __I_NEW);
1102 0 : spin_unlock(&inode->i_lock);
1103 0 : iput(inode);
1104 0 : }
1105 : EXPORT_SYMBOL(discard_new_inode);
1106 :
1107 : /**
1108 : * lock_two_nondirectories - take two i_mutexes on non-directory objects
1109 : *
1110 : * Lock any non-NULL argument that is not a directory.
1111 : * Zero, one or two objects may be locked by this function.
1112 : *
1113 : * @inode1: first inode to lock
1114 : * @inode2: second inode to lock
1115 : */
1116 0 : void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1117 : {
1118 0 : if (inode1 > inode2)
1119 0 : swap(inode1, inode2);
1120 :
1121 0 : if (inode1 && !S_ISDIR(inode1->i_mode))
1122 : inode_lock(inode1);
1123 0 : if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1124 0 : inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1125 0 : }
1126 : EXPORT_SYMBOL(lock_two_nondirectories);
1127 :
1128 : /**
1129 : * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1130 : * @inode1: first inode to unlock
1131 : * @inode2: second inode to unlock
1132 : */
1133 0 : void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1134 : {
1135 0 : if (inode1 && !S_ISDIR(inode1->i_mode))
1136 : inode_unlock(inode1);
1137 0 : if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1138 : inode_unlock(inode2);
1139 0 : }
1140 : EXPORT_SYMBOL(unlock_two_nondirectories);
1141 :
1142 : /**
1143 : * inode_insert5 - obtain an inode from a mounted file system
1144 : * @inode: pre-allocated inode to use for insert to cache
1145 : * @hashval: hash value (usually inode number) to get
1146 : * @test: callback used for comparisons between inodes
1147 : * @set: callback used to initialize a new struct inode
1148 : * @data: opaque data pointer to pass to @test and @set
1149 : *
1150 : * Search for the inode specified by @hashval and @data in the inode cache,
1151 : * and if present it is return it with an increased reference count. This is
1152 : * a variant of iget5_locked() for callers that don't want to fail on memory
1153 : * allocation of inode.
1154 : *
1155 : * If the inode is not in cache, insert the pre-allocated inode to cache and
1156 : * return it locked, hashed, and with the I_NEW flag set. The file system gets
1157 : * to fill it in before unlocking it via unlock_new_inode().
1158 : *
1159 : * Note both @test and @set are called with the inode_hash_lock held, so can't
1160 : * sleep.
1161 : */
1162 0 : struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1163 : int (*test)(struct inode *, void *),
1164 : int (*set)(struct inode *, void *), void *data)
1165 : {
1166 0 : struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1167 : struct inode *old;
1168 :
1169 : again:
1170 0 : spin_lock(&inode_hash_lock);
1171 0 : old = find_inode(inode->i_sb, head, test, data);
1172 0 : if (unlikely(old)) {
1173 : /*
1174 : * Uhhuh, somebody else created the same inode under us.
1175 : * Use the old inode instead of the preallocated one.
1176 : */
1177 0 : spin_unlock(&inode_hash_lock);
1178 0 : if (IS_ERR(old))
1179 : return NULL;
1180 0 : wait_on_inode(old);
1181 0 : if (unlikely(inode_unhashed(old))) {
1182 0 : iput(old);
1183 0 : goto again;
1184 : }
1185 : return old;
1186 : }
1187 :
1188 0 : if (set && unlikely(set(inode, data))) {
1189 : inode = NULL;
1190 : goto unlock;
1191 : }
1192 :
1193 : /*
1194 : * Return the locked inode with I_NEW set, the
1195 : * caller is responsible for filling in the contents
1196 : */
1197 0 : spin_lock(&inode->i_lock);
1198 0 : inode->i_state |= I_NEW;
1199 0 : hlist_add_head_rcu(&inode->i_hash, head);
1200 0 : spin_unlock(&inode->i_lock);
1201 :
1202 : /*
1203 : * Add inode to the sb list if it's not already. It has I_NEW at this
1204 : * point, so it should be safe to test i_sb_list locklessly.
1205 : */
1206 0 : if (list_empty(&inode->i_sb_list))
1207 : inode_sb_list_add(inode);
1208 : unlock:
1209 0 : spin_unlock(&inode_hash_lock);
1210 :
1211 0 : return inode;
1212 : }
1213 : EXPORT_SYMBOL(inode_insert5);
1214 :
1215 : /**
1216 : * iget5_locked - obtain an inode from a mounted file system
1217 : * @sb: super block of file system
1218 : * @hashval: hash value (usually inode number) to get
1219 : * @test: callback used for comparisons between inodes
1220 : * @set: callback used to initialize a new struct inode
1221 : * @data: opaque data pointer to pass to @test and @set
1222 : *
1223 : * Search for the inode specified by @hashval and @data in the inode cache,
1224 : * and if present it is return it with an increased reference count. This is
1225 : * a generalized version of iget_locked() for file systems where the inode
1226 : * number is not sufficient for unique identification of an inode.
1227 : *
1228 : * If the inode is not in cache, allocate a new inode and return it locked,
1229 : * hashed, and with the I_NEW flag set. The file system gets to fill it in
1230 : * before unlocking it via unlock_new_inode().
1231 : *
1232 : * Note both @test and @set are called with the inode_hash_lock held, so can't
1233 : * sleep.
1234 : */
1235 0 : struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1236 : int (*test)(struct inode *, void *),
1237 : int (*set)(struct inode *, void *), void *data)
1238 : {
1239 0 : struct inode *inode = ilookup5(sb, hashval, test, data);
1240 :
1241 0 : if (!inode) {
1242 0 : struct inode *new = alloc_inode(sb);
1243 :
1244 0 : if (new) {
1245 0 : new->i_state = 0;
1246 0 : inode = inode_insert5(new, hashval, test, set, data);
1247 0 : if (unlikely(inode != new))
1248 0 : destroy_inode(new);
1249 : }
1250 : }
1251 0 : return inode;
1252 : }
1253 : EXPORT_SYMBOL(iget5_locked);
1254 :
1255 : /**
1256 : * iget_locked - obtain an inode from a mounted file system
1257 : * @sb: super block of file system
1258 : * @ino: inode number to get
1259 : *
1260 : * Search for the inode specified by @ino in the inode cache and if present
1261 : * return it with an increased reference count. This is for file systems
1262 : * where the inode number is sufficient for unique identification of an inode.
1263 : *
1264 : * If the inode is not in cache, allocate a new inode and return it locked,
1265 : * hashed, and with the I_NEW flag set. The file system gets to fill it in
1266 : * before unlocking it via unlock_new_inode().
1267 : */
1268 0 : struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1269 : {
1270 0 : struct hlist_head *head = inode_hashtable + hash(sb, ino);
1271 : struct inode *inode;
1272 : again:
1273 0 : spin_lock(&inode_hash_lock);
1274 0 : inode = find_inode_fast(sb, head, ino);
1275 0 : spin_unlock(&inode_hash_lock);
1276 0 : if (inode) {
1277 0 : if (IS_ERR(inode))
1278 : return NULL;
1279 0 : wait_on_inode(inode);
1280 0 : if (unlikely(inode_unhashed(inode))) {
1281 0 : iput(inode);
1282 0 : goto again;
1283 : }
1284 : return inode;
1285 : }
1286 :
1287 0 : inode = alloc_inode(sb);
1288 0 : if (inode) {
1289 : struct inode *old;
1290 :
1291 0 : spin_lock(&inode_hash_lock);
1292 : /* We released the lock, so.. */
1293 0 : old = find_inode_fast(sb, head, ino);
1294 0 : if (!old) {
1295 0 : inode->i_ino = ino;
1296 0 : spin_lock(&inode->i_lock);
1297 0 : inode->i_state = I_NEW;
1298 0 : hlist_add_head_rcu(&inode->i_hash, head);
1299 0 : spin_unlock(&inode->i_lock);
1300 0 : inode_sb_list_add(inode);
1301 0 : spin_unlock(&inode_hash_lock);
1302 :
1303 : /* Return the locked inode with I_NEW set, the
1304 : * caller is responsible for filling in the contents
1305 : */
1306 0 : return inode;
1307 : }
1308 :
1309 : /*
1310 : * Uhhuh, somebody else created the same inode under
1311 : * us. Use the old inode instead of the one we just
1312 : * allocated.
1313 : */
1314 0 : spin_unlock(&inode_hash_lock);
1315 0 : destroy_inode(inode);
1316 0 : if (IS_ERR(old))
1317 : return NULL;
1318 0 : inode = old;
1319 0 : wait_on_inode(inode);
1320 0 : if (unlikely(inode_unhashed(inode))) {
1321 0 : iput(inode);
1322 0 : goto again;
1323 : }
1324 : }
1325 : return inode;
1326 : }
1327 : EXPORT_SYMBOL(iget_locked);
1328 :
1329 : /*
1330 : * search the inode cache for a matching inode number.
1331 : * If we find one, then the inode number we are trying to
1332 : * allocate is not unique and so we should not use it.
1333 : *
1334 : * Returns 1 if the inode number is unique, 0 if it is not.
1335 : */
1336 0 : static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1337 : {
1338 0 : struct hlist_head *b = inode_hashtable + hash(sb, ino);
1339 : struct inode *inode;
1340 :
1341 0 : hlist_for_each_entry_rcu(inode, b, i_hash) {
1342 0 : if (inode->i_ino == ino && inode->i_sb == sb)
1343 : return 0;
1344 : }
1345 : return 1;
1346 : }
1347 :
1348 : /**
1349 : * iunique - get a unique inode number
1350 : * @sb: superblock
1351 : * @max_reserved: highest reserved inode number
1352 : *
1353 : * Obtain an inode number that is unique on the system for a given
1354 : * superblock. This is used by file systems that have no natural
1355 : * permanent inode numbering system. An inode number is returned that
1356 : * is higher than the reserved limit but unique.
1357 : *
1358 : * BUGS:
1359 : * With a large number of inodes live on the file system this function
1360 : * currently becomes quite slow.
1361 : */
1362 0 : ino_t iunique(struct super_block *sb, ino_t max_reserved)
1363 : {
1364 : /*
1365 : * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1366 : * error if st_ino won't fit in target struct field. Use 32bit counter
1367 : * here to attempt to avoid that.
1368 : */
1369 : static DEFINE_SPINLOCK(iunique_lock);
1370 : static unsigned int counter;
1371 : ino_t res;
1372 :
1373 0 : rcu_read_lock();
1374 : spin_lock(&iunique_lock);
1375 : do {
1376 0 : if (counter <= max_reserved)
1377 0 : counter = max_reserved + 1;
1378 0 : res = counter++;
1379 0 : } while (!test_inode_iunique(sb, res));
1380 0 : spin_unlock(&iunique_lock);
1381 : rcu_read_unlock();
1382 :
1383 0 : return res;
1384 : }
1385 : EXPORT_SYMBOL(iunique);
1386 :
1387 0 : struct inode *igrab(struct inode *inode)
1388 : {
1389 0 : spin_lock(&inode->i_lock);
1390 0 : if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1391 0 : __iget(inode);
1392 0 : spin_unlock(&inode->i_lock);
1393 : } else {
1394 0 : spin_unlock(&inode->i_lock);
1395 : /*
1396 : * Handle the case where s_op->clear_inode is not been
1397 : * called yet, and somebody is calling igrab
1398 : * while the inode is getting freed.
1399 : */
1400 0 : inode = NULL;
1401 : }
1402 0 : return inode;
1403 : }
1404 : EXPORT_SYMBOL(igrab);
1405 :
1406 : /**
1407 : * ilookup5_nowait - search for an inode in the inode cache
1408 : * @sb: super block of file system to search
1409 : * @hashval: hash value (usually inode number) to search for
1410 : * @test: callback used for comparisons between inodes
1411 : * @data: opaque data pointer to pass to @test
1412 : *
1413 : * Search for the inode specified by @hashval and @data in the inode cache.
1414 : * If the inode is in the cache, the inode is returned with an incremented
1415 : * reference count.
1416 : *
1417 : * Note: I_NEW is not waited upon so you have to be very careful what you do
1418 : * with the returned inode. You probably should be using ilookup5() instead.
1419 : *
1420 : * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1421 : */
1422 0 : struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1423 : int (*test)(struct inode *, void *), void *data)
1424 : {
1425 0 : struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1426 : struct inode *inode;
1427 :
1428 0 : spin_lock(&inode_hash_lock);
1429 0 : inode = find_inode(sb, head, test, data);
1430 0 : spin_unlock(&inode_hash_lock);
1431 :
1432 0 : return IS_ERR(inode) ? NULL : inode;
1433 : }
1434 : EXPORT_SYMBOL(ilookup5_nowait);
1435 :
1436 : /**
1437 : * ilookup5 - search for an inode in the inode cache
1438 : * @sb: super block of file system to search
1439 : * @hashval: hash value (usually inode number) to search for
1440 : * @test: callback used for comparisons between inodes
1441 : * @data: opaque data pointer to pass to @test
1442 : *
1443 : * Search for the inode specified by @hashval and @data in the inode cache,
1444 : * and if the inode is in the cache, return the inode with an incremented
1445 : * reference count. Waits on I_NEW before returning the inode.
1446 : * returned with an incremented reference count.
1447 : *
1448 : * This is a generalized version of ilookup() for file systems where the
1449 : * inode number is not sufficient for unique identification of an inode.
1450 : *
1451 : * Note: @test is called with the inode_hash_lock held, so can't sleep.
1452 : */
1453 0 : struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1454 : int (*test)(struct inode *, void *), void *data)
1455 : {
1456 : struct inode *inode;
1457 : again:
1458 0 : inode = ilookup5_nowait(sb, hashval, test, data);
1459 0 : if (inode) {
1460 0 : wait_on_inode(inode);
1461 0 : if (unlikely(inode_unhashed(inode))) {
1462 0 : iput(inode);
1463 0 : goto again;
1464 : }
1465 : }
1466 0 : return inode;
1467 : }
1468 : EXPORT_SYMBOL(ilookup5);
1469 :
1470 : /**
1471 : * ilookup - search for an inode in the inode cache
1472 : * @sb: super block of file system to search
1473 : * @ino: inode number to search for
1474 : *
1475 : * Search for the inode @ino in the inode cache, and if the inode is in the
1476 : * cache, the inode is returned with an incremented reference count.
1477 : */
1478 0 : struct inode *ilookup(struct super_block *sb, unsigned long ino)
1479 : {
1480 0 : struct hlist_head *head = inode_hashtable + hash(sb, ino);
1481 : struct inode *inode;
1482 : again:
1483 0 : spin_lock(&inode_hash_lock);
1484 0 : inode = find_inode_fast(sb, head, ino);
1485 0 : spin_unlock(&inode_hash_lock);
1486 :
1487 0 : if (inode) {
1488 0 : if (IS_ERR(inode))
1489 : return NULL;
1490 0 : wait_on_inode(inode);
1491 0 : if (unlikely(inode_unhashed(inode))) {
1492 0 : iput(inode);
1493 0 : goto again;
1494 : }
1495 : }
1496 : return inode;
1497 : }
1498 : EXPORT_SYMBOL(ilookup);
1499 :
1500 : /**
1501 : * find_inode_nowait - find an inode in the inode cache
1502 : * @sb: super block of file system to search
1503 : * @hashval: hash value (usually inode number) to search for
1504 : * @match: callback used for comparisons between inodes
1505 : * @data: opaque data pointer to pass to @match
1506 : *
1507 : * Search for the inode specified by @hashval and @data in the inode
1508 : * cache, where the helper function @match will return 0 if the inode
1509 : * does not match, 1 if the inode does match, and -1 if the search
1510 : * should be stopped. The @match function must be responsible for
1511 : * taking the i_lock spin_lock and checking i_state for an inode being
1512 : * freed or being initialized, and incrementing the reference count
1513 : * before returning 1. It also must not sleep, since it is called with
1514 : * the inode_hash_lock spinlock held.
1515 : *
1516 : * This is a even more generalized version of ilookup5() when the
1517 : * function must never block --- find_inode() can block in
1518 : * __wait_on_freeing_inode() --- or when the caller can not increment
1519 : * the reference count because the resulting iput() might cause an
1520 : * inode eviction. The tradeoff is that the @match funtion must be
1521 : * very carefully implemented.
1522 : */
1523 0 : struct inode *find_inode_nowait(struct super_block *sb,
1524 : unsigned long hashval,
1525 : int (*match)(struct inode *, unsigned long,
1526 : void *),
1527 : void *data)
1528 : {
1529 0 : struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1530 0 : struct inode *inode, *ret_inode = NULL;
1531 : int mval;
1532 :
1533 0 : spin_lock(&inode_hash_lock);
1534 0 : hlist_for_each_entry(inode, head, i_hash) {
1535 0 : if (inode->i_sb != sb)
1536 0 : continue;
1537 0 : mval = match(inode, hashval, data);
1538 0 : if (mval == 0)
1539 0 : continue;
1540 0 : if (mval == 1)
1541 0 : ret_inode = inode;
1542 : goto out;
1543 : }
1544 : out:
1545 0 : spin_unlock(&inode_hash_lock);
1546 0 : return ret_inode;
1547 : }
1548 : EXPORT_SYMBOL(find_inode_nowait);
1549 :
1550 : /**
1551 : * find_inode_rcu - find an inode in the inode cache
1552 : * @sb: Super block of file system to search
1553 : * @hashval: Key to hash
1554 : * @test: Function to test match on an inode
1555 : * @data: Data for test function
1556 : *
1557 : * Search for the inode specified by @hashval and @data in the inode cache,
1558 : * where the helper function @test will return 0 if the inode does not match
1559 : * and 1 if it does. The @test function must be responsible for taking the
1560 : * i_lock spin_lock and checking i_state for an inode being freed or being
1561 : * initialized.
1562 : *
1563 : * If successful, this will return the inode for which the @test function
1564 : * returned 1 and NULL otherwise.
1565 : *
1566 : * The @test function is not permitted to take a ref on any inode presented.
1567 : * It is also not permitted to sleep.
1568 : *
1569 : * The caller must hold the RCU read lock.
1570 : */
1571 0 : struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1572 : int (*test)(struct inode *, void *), void *data)
1573 : {
1574 0 : struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1575 : struct inode *inode;
1576 :
1577 : RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1578 : "suspicious find_inode_rcu() usage");
1579 :
1580 0 : hlist_for_each_entry_rcu(inode, head, i_hash) {
1581 0 : if (inode->i_sb == sb &&
1582 0 : !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1583 0 : test(inode, data))
1584 : return inode;
1585 : }
1586 : return NULL;
1587 : }
1588 : EXPORT_SYMBOL(find_inode_rcu);
1589 :
1590 : /**
1591 : * find_inode_by_ino_rcu - Find an inode in the inode cache
1592 : * @sb: Super block of file system to search
1593 : * @ino: The inode number to match
1594 : *
1595 : * Search for the inode specified by @hashval and @data in the inode cache,
1596 : * where the helper function @test will return 0 if the inode does not match
1597 : * and 1 if it does. The @test function must be responsible for taking the
1598 : * i_lock spin_lock and checking i_state for an inode being freed or being
1599 : * initialized.
1600 : *
1601 : * If successful, this will return the inode for which the @test function
1602 : * returned 1 and NULL otherwise.
1603 : *
1604 : * The @test function is not permitted to take a ref on any inode presented.
1605 : * It is also not permitted to sleep.
1606 : *
1607 : * The caller must hold the RCU read lock.
1608 : */
1609 0 : struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1610 : unsigned long ino)
1611 : {
1612 0 : struct hlist_head *head = inode_hashtable + hash(sb, ino);
1613 : struct inode *inode;
1614 :
1615 : RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1616 : "suspicious find_inode_by_ino_rcu() usage");
1617 :
1618 0 : hlist_for_each_entry_rcu(inode, head, i_hash) {
1619 0 : if (inode->i_ino == ino &&
1620 0 : inode->i_sb == sb &&
1621 0 : !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1622 : return inode;
1623 : }
1624 : return NULL;
1625 : }
1626 : EXPORT_SYMBOL(find_inode_by_ino_rcu);
1627 :
1628 0 : int insert_inode_locked(struct inode *inode)
1629 : {
1630 0 : struct super_block *sb = inode->i_sb;
1631 0 : ino_t ino = inode->i_ino;
1632 0 : struct hlist_head *head = inode_hashtable + hash(sb, ino);
1633 :
1634 0 : while (1) {
1635 0 : struct inode *old = NULL;
1636 0 : spin_lock(&inode_hash_lock);
1637 0 : hlist_for_each_entry(old, head, i_hash) {
1638 0 : if (old->i_ino != ino)
1639 0 : continue;
1640 0 : if (old->i_sb != sb)
1641 0 : continue;
1642 0 : spin_lock(&old->i_lock);
1643 0 : if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1644 0 : spin_unlock(&old->i_lock);
1645 0 : continue;
1646 : }
1647 : break;
1648 : }
1649 0 : if (likely(!old)) {
1650 0 : spin_lock(&inode->i_lock);
1651 0 : inode->i_state |= I_NEW | I_CREATING;
1652 0 : hlist_add_head_rcu(&inode->i_hash, head);
1653 0 : spin_unlock(&inode->i_lock);
1654 0 : spin_unlock(&inode_hash_lock);
1655 0 : return 0;
1656 : }
1657 0 : if (unlikely(old->i_state & I_CREATING)) {
1658 0 : spin_unlock(&old->i_lock);
1659 0 : spin_unlock(&inode_hash_lock);
1660 0 : return -EBUSY;
1661 : }
1662 0 : __iget(old);
1663 0 : spin_unlock(&old->i_lock);
1664 0 : spin_unlock(&inode_hash_lock);
1665 0 : wait_on_inode(old);
1666 0 : if (unlikely(!inode_unhashed(old))) {
1667 0 : iput(old);
1668 0 : return -EBUSY;
1669 : }
1670 0 : iput(old);
1671 : }
1672 : }
1673 : EXPORT_SYMBOL(insert_inode_locked);
1674 :
1675 0 : int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1676 : int (*test)(struct inode *, void *), void *data)
1677 : {
1678 : struct inode *old;
1679 :
1680 0 : inode->i_state |= I_CREATING;
1681 0 : old = inode_insert5(inode, hashval, test, NULL, data);
1682 :
1683 0 : if (old != inode) {
1684 0 : iput(old);
1685 0 : return -EBUSY;
1686 : }
1687 : return 0;
1688 : }
1689 : EXPORT_SYMBOL(insert_inode_locked4);
1690 :
1691 :
1692 0 : int generic_delete_inode(struct inode *inode)
1693 : {
1694 0 : return 1;
1695 : }
1696 : EXPORT_SYMBOL(generic_delete_inode);
1697 :
1698 : /*
1699 : * Called when we're dropping the last reference
1700 : * to an inode.
1701 : *
1702 : * Call the FS "drop_inode()" function, defaulting to
1703 : * the legacy UNIX filesystem behaviour. If it tells
1704 : * us to evict inode, do so. Otherwise, retain inode
1705 : * in cache if fs is alive, sync and evict if fs is
1706 : * shutting down.
1707 : */
1708 34 : static void iput_final(struct inode *inode)
1709 : {
1710 34 : struct super_block *sb = inode->i_sb;
1711 34 : const struct super_operations *op = inode->i_sb->s_op;
1712 : unsigned long state;
1713 : int drop;
1714 :
1715 34 : WARN_ON(inode->i_state & I_NEW);
1716 :
1717 34 : if (op->drop_inode)
1718 0 : drop = op->drop_inode(inode);
1719 : else
1720 : drop = generic_drop_inode(inode);
1721 :
1722 34 : if (!drop &&
1723 0 : !(inode->i_state & I_DONTCACHE) &&
1724 0 : (sb->s_flags & SB_ACTIVE)) {
1725 0 : __inode_add_lru(inode, true);
1726 0 : spin_unlock(&inode->i_lock);
1727 : return;
1728 : }
1729 :
1730 34 : state = inode->i_state;
1731 34 : if (!drop) {
1732 0 : WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1733 0 : spin_unlock(&inode->i_lock);
1734 :
1735 0 : write_inode_now(inode, 1);
1736 :
1737 0 : spin_lock(&inode->i_lock);
1738 0 : state = inode->i_state;
1739 0 : WARN_ON(state & I_NEW);
1740 0 : state &= ~I_WILL_FREE;
1741 : }
1742 :
1743 34 : WRITE_ONCE(inode->i_state, state | I_FREEING);
1744 68 : if (!list_empty(&inode->i_lru))
1745 0 : inode_lru_list_del(inode);
1746 68 : spin_unlock(&inode->i_lock);
1747 :
1748 34 : evict(inode);
1749 : }
1750 :
1751 : /**
1752 : * iput - put an inode
1753 : * @inode: inode to put
1754 : *
1755 : * Puts an inode, dropping its usage count. If the inode use count hits
1756 : * zero, the inode is then freed and may also be destroyed.
1757 : *
1758 : * Consequently, iput() can sleep.
1759 : */
1760 51 : void iput(struct inode *inode)
1761 : {
1762 51 : if (!inode)
1763 : return;
1764 34 : BUG_ON(inode->i_state & I_CLEAR);
1765 : retry:
1766 34 : if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1767 34 : if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1768 0 : atomic_inc(&inode->i_count);
1769 0 : spin_unlock(&inode->i_lock);
1770 0 : trace_writeback_lazytime_iput(inode);
1771 : mark_inode_dirty_sync(inode);
1772 : goto retry;
1773 : }
1774 34 : iput_final(inode);
1775 : }
1776 : }
1777 : EXPORT_SYMBOL(iput);
1778 :
1779 : #ifdef CONFIG_BLOCK
1780 : /**
1781 : * bmap - find a block number in a file
1782 : * @inode: inode owning the block number being requested
1783 : * @block: pointer containing the block to find
1784 : *
1785 : * Replaces the value in ``*block`` with the block number on the device holding
1786 : * corresponding to the requested block number in the file.
1787 : * That is, asked for block 4 of inode 1 the function will replace the
1788 : * 4 in ``*block``, with disk block relative to the disk start that holds that
1789 : * block of the file.
1790 : *
1791 : * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1792 : * hole, returns 0 and ``*block`` is also set to 0.
1793 : */
1794 0 : int bmap(struct inode *inode, sector_t *block)
1795 : {
1796 0 : if (!inode->i_mapping->a_ops->bmap)
1797 : return -EINVAL;
1798 :
1799 0 : *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1800 0 : return 0;
1801 : }
1802 : EXPORT_SYMBOL(bmap);
1803 : #endif
1804 :
1805 : /*
1806 : * With relative atime, only update atime if the previous atime is
1807 : * earlier than either the ctime or mtime or if at least a day has
1808 : * passed since the last atime update.
1809 : */
1810 0 : static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1811 : struct timespec64 now)
1812 : {
1813 :
1814 0 : if (!(mnt->mnt_flags & MNT_RELATIME))
1815 : return 1;
1816 : /*
1817 : * Is mtime younger than atime? If yes, update atime:
1818 : */
1819 0 : if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1820 : return 1;
1821 : /*
1822 : * Is ctime younger than atime? If yes, update atime:
1823 : */
1824 0 : if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1825 : return 1;
1826 :
1827 : /*
1828 : * Is the previous atime value older than a day? If yes,
1829 : * update atime:
1830 : */
1831 0 : if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1832 : return 1;
1833 : /*
1834 : * Good, we can skip the atime update:
1835 : */
1836 : return 0;
1837 : }
1838 :
1839 0 : int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
1840 : {
1841 0 : int dirty_flags = 0;
1842 :
1843 0 : if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
1844 0 : if (flags & S_ATIME)
1845 0 : inode->i_atime = *time;
1846 0 : if (flags & S_CTIME)
1847 0 : inode->i_ctime = *time;
1848 0 : if (flags & S_MTIME)
1849 0 : inode->i_mtime = *time;
1850 :
1851 0 : if (inode->i_sb->s_flags & SB_LAZYTIME)
1852 : dirty_flags |= I_DIRTY_TIME;
1853 : else
1854 0 : dirty_flags |= I_DIRTY_SYNC;
1855 : }
1856 :
1857 0 : if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
1858 0 : dirty_flags |= I_DIRTY_SYNC;
1859 :
1860 0 : __mark_inode_dirty(inode, dirty_flags);
1861 0 : return 0;
1862 : }
1863 : EXPORT_SYMBOL(generic_update_time);
1864 :
1865 : /*
1866 : * This does the actual work of updating an inodes time or version. Must have
1867 : * had called mnt_want_write() before calling this.
1868 : */
1869 0 : int inode_update_time(struct inode *inode, struct timespec64 *time, int flags)
1870 : {
1871 0 : if (inode->i_op->update_time)
1872 0 : return inode->i_op->update_time(inode, time, flags);
1873 0 : return generic_update_time(inode, time, flags);
1874 : }
1875 : EXPORT_SYMBOL(inode_update_time);
1876 :
1877 : /**
1878 : * atime_needs_update - update the access time
1879 : * @path: the &struct path to update
1880 : * @inode: inode to update
1881 : *
1882 : * Update the accessed time on an inode and mark it for writeback.
1883 : * This function automatically handles read only file systems and media,
1884 : * as well as the "noatime" flag and inode specific "noatime" markers.
1885 : */
1886 0 : bool atime_needs_update(const struct path *path, struct inode *inode)
1887 : {
1888 0 : struct vfsmount *mnt = path->mnt;
1889 : struct timespec64 now;
1890 :
1891 0 : if (inode->i_flags & S_NOATIME)
1892 : return false;
1893 :
1894 : /* Atime updates will likely cause i_uid and i_gid to be written
1895 : * back improprely if their true value is unknown to the vfs.
1896 : */
1897 0 : if (HAS_UNMAPPED_ID(mnt_idmap(mnt), inode))
1898 : return false;
1899 :
1900 0 : if (IS_NOATIME(inode))
1901 : return false;
1902 0 : if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1903 : return false;
1904 :
1905 0 : if (mnt->mnt_flags & MNT_NOATIME)
1906 : return false;
1907 0 : if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1908 : return false;
1909 :
1910 0 : now = current_time(inode);
1911 :
1912 0 : if (!relatime_need_update(mnt, inode, now))
1913 : return false;
1914 :
1915 0 : if (timespec64_equal(&inode->i_atime, &now))
1916 : return false;
1917 :
1918 0 : return true;
1919 : }
1920 :
1921 0 : void touch_atime(const struct path *path)
1922 : {
1923 0 : struct vfsmount *mnt = path->mnt;
1924 0 : struct inode *inode = d_inode(path->dentry);
1925 : struct timespec64 now;
1926 :
1927 0 : if (!atime_needs_update(path, inode))
1928 0 : return;
1929 :
1930 0 : if (!sb_start_write_trylock(inode->i_sb))
1931 : return;
1932 :
1933 0 : if (__mnt_want_write(mnt) != 0)
1934 : goto skip_update;
1935 : /*
1936 : * File systems can error out when updating inodes if they need to
1937 : * allocate new space to modify an inode (such is the case for
1938 : * Btrfs), but since we touch atime while walking down the path we
1939 : * really don't care if we failed to update the atime of the file,
1940 : * so just ignore the return value.
1941 : * We may also fail on filesystems that have the ability to make parts
1942 : * of the fs read only, e.g. subvolumes in Btrfs.
1943 : */
1944 0 : now = current_time(inode);
1945 0 : inode_update_time(inode, &now, S_ATIME);
1946 0 : __mnt_drop_write(mnt);
1947 : skip_update:
1948 0 : sb_end_write(inode->i_sb);
1949 : }
1950 : EXPORT_SYMBOL(touch_atime);
1951 :
1952 : /*
1953 : * Return mask of changes for notify_change() that need to be done as a
1954 : * response to write or truncate. Return 0 if nothing has to be changed.
1955 : * Negative value on error (change should be denied).
1956 : */
1957 0 : int dentry_needs_remove_privs(struct mnt_idmap *idmap,
1958 : struct dentry *dentry)
1959 : {
1960 0 : struct inode *inode = d_inode(dentry);
1961 0 : int mask = 0;
1962 : int ret;
1963 :
1964 0 : if (IS_NOSEC(inode))
1965 : return 0;
1966 :
1967 0 : mask = setattr_should_drop_suidgid(idmap, inode);
1968 0 : ret = security_inode_need_killpriv(dentry);
1969 0 : if (ret < 0)
1970 : return ret;
1971 0 : if (ret)
1972 0 : mask |= ATTR_KILL_PRIV;
1973 : return mask;
1974 : }
1975 :
1976 : static int __remove_privs(struct mnt_idmap *idmap,
1977 : struct dentry *dentry, int kill)
1978 : {
1979 : struct iattr newattrs;
1980 :
1981 0 : newattrs.ia_valid = ATTR_FORCE | kill;
1982 : /*
1983 : * Note we call this on write, so notify_change will not
1984 : * encounter any conflicting delegations:
1985 : */
1986 0 : return notify_change(idmap, dentry, &newattrs, NULL);
1987 : }
1988 :
1989 0 : static int __file_remove_privs(struct file *file, unsigned int flags)
1990 : {
1991 0 : struct dentry *dentry = file_dentry(file);
1992 0 : struct inode *inode = file_inode(file);
1993 0 : int error = 0;
1994 : int kill;
1995 :
1996 0 : if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1997 : return 0;
1998 :
1999 0 : kill = dentry_needs_remove_privs(file_mnt_idmap(file), dentry);
2000 0 : if (kill < 0)
2001 : return kill;
2002 :
2003 0 : if (kill) {
2004 0 : if (flags & IOCB_NOWAIT)
2005 : return -EAGAIN;
2006 :
2007 0 : error = __remove_privs(file_mnt_idmap(file), dentry, kill);
2008 : }
2009 :
2010 0 : if (!error)
2011 : inode_has_no_xattr(inode);
2012 : return error;
2013 : }
2014 :
2015 : /**
2016 : * file_remove_privs - remove special file privileges (suid, capabilities)
2017 : * @file: file to remove privileges from
2018 : *
2019 : * When file is modified by a write or truncation ensure that special
2020 : * file privileges are removed.
2021 : *
2022 : * Return: 0 on success, negative errno on failure.
2023 : */
2024 0 : int file_remove_privs(struct file *file)
2025 : {
2026 0 : return __file_remove_privs(file, 0);
2027 : }
2028 : EXPORT_SYMBOL(file_remove_privs);
2029 :
2030 0 : static int inode_needs_update_time(struct inode *inode, struct timespec64 *now)
2031 : {
2032 0 : int sync_it = 0;
2033 :
2034 : /* First try to exhaust all avenues to not sync */
2035 0 : if (IS_NOCMTIME(inode))
2036 : return 0;
2037 :
2038 0 : if (!timespec64_equal(&inode->i_mtime, now))
2039 0 : sync_it = S_MTIME;
2040 :
2041 0 : if (!timespec64_equal(&inode->i_ctime, now))
2042 0 : sync_it |= S_CTIME;
2043 :
2044 0 : if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
2045 0 : sync_it |= S_VERSION;
2046 :
2047 : return sync_it;
2048 : }
2049 :
2050 0 : static int __file_update_time(struct file *file, struct timespec64 *now,
2051 : int sync_mode)
2052 : {
2053 0 : int ret = 0;
2054 0 : struct inode *inode = file_inode(file);
2055 :
2056 : /* try to update time settings */
2057 0 : if (!__mnt_want_write_file(file)) {
2058 0 : ret = inode_update_time(inode, now, sync_mode);
2059 0 : __mnt_drop_write_file(file);
2060 : }
2061 :
2062 0 : return ret;
2063 : }
2064 :
2065 : /**
2066 : * file_update_time - update mtime and ctime time
2067 : * @file: file accessed
2068 : *
2069 : * Update the mtime and ctime members of an inode and mark the inode for
2070 : * writeback. Note that this function is meant exclusively for usage in
2071 : * the file write path of filesystems, and filesystems may choose to
2072 : * explicitly ignore updates via this function with the _NOCMTIME inode
2073 : * flag, e.g. for network filesystem where these imestamps are handled
2074 : * by the server. This can return an error for file systems who need to
2075 : * allocate space in order to update an inode.
2076 : *
2077 : * Return: 0 on success, negative errno on failure.
2078 : */
2079 0 : int file_update_time(struct file *file)
2080 : {
2081 : int ret;
2082 0 : struct inode *inode = file_inode(file);
2083 0 : struct timespec64 now = current_time(inode);
2084 :
2085 0 : ret = inode_needs_update_time(inode, &now);
2086 0 : if (ret <= 0)
2087 : return ret;
2088 :
2089 0 : return __file_update_time(file, &now, ret);
2090 : }
2091 : EXPORT_SYMBOL(file_update_time);
2092 :
2093 : /**
2094 : * file_modified_flags - handle mandated vfs changes when modifying a file
2095 : * @file: file that was modified
2096 : * @flags: kiocb flags
2097 : *
2098 : * When file has been modified ensure that special
2099 : * file privileges are removed and time settings are updated.
2100 : *
2101 : * If IOCB_NOWAIT is set, special file privileges will not be removed and
2102 : * time settings will not be updated. It will return -EAGAIN.
2103 : *
2104 : * Context: Caller must hold the file's inode lock.
2105 : *
2106 : * Return: 0 on success, negative errno on failure.
2107 : */
2108 0 : static int file_modified_flags(struct file *file, int flags)
2109 : {
2110 : int ret;
2111 0 : struct inode *inode = file_inode(file);
2112 0 : struct timespec64 now = current_time(inode);
2113 :
2114 : /*
2115 : * Clear the security bits if the process is not being run by root.
2116 : * This keeps people from modifying setuid and setgid binaries.
2117 : */
2118 0 : ret = __file_remove_privs(file, flags);
2119 0 : if (ret)
2120 : return ret;
2121 :
2122 0 : if (unlikely(file->f_mode & FMODE_NOCMTIME))
2123 : return 0;
2124 :
2125 0 : ret = inode_needs_update_time(inode, &now);
2126 0 : if (ret <= 0)
2127 : return ret;
2128 0 : if (flags & IOCB_NOWAIT)
2129 : return -EAGAIN;
2130 :
2131 0 : return __file_update_time(file, &now, ret);
2132 : }
2133 :
2134 : /**
2135 : * file_modified - handle mandated vfs changes when modifying a file
2136 : * @file: file that was modified
2137 : *
2138 : * When file has been modified ensure that special
2139 : * file privileges are removed and time settings are updated.
2140 : *
2141 : * Context: Caller must hold the file's inode lock.
2142 : *
2143 : * Return: 0 on success, negative errno on failure.
2144 : */
2145 0 : int file_modified(struct file *file)
2146 : {
2147 0 : return file_modified_flags(file, 0);
2148 : }
2149 : EXPORT_SYMBOL(file_modified);
2150 :
2151 : /**
2152 : * kiocb_modified - handle mandated vfs changes when modifying a file
2153 : * @iocb: iocb that was modified
2154 : *
2155 : * When file has been modified ensure that special
2156 : * file privileges are removed and time settings are updated.
2157 : *
2158 : * Context: Caller must hold the file's inode lock.
2159 : *
2160 : * Return: 0 on success, negative errno on failure.
2161 : */
2162 0 : int kiocb_modified(struct kiocb *iocb)
2163 : {
2164 0 : return file_modified_flags(iocb->ki_filp, iocb->ki_flags);
2165 : }
2166 : EXPORT_SYMBOL_GPL(kiocb_modified);
2167 :
2168 0 : int inode_needs_sync(struct inode *inode)
2169 : {
2170 0 : if (IS_SYNC(inode))
2171 : return 1;
2172 0 : if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2173 : return 1;
2174 0 : return 0;
2175 : }
2176 : EXPORT_SYMBOL(inode_needs_sync);
2177 :
2178 : /*
2179 : * If we try to find an inode in the inode hash while it is being
2180 : * deleted, we have to wait until the filesystem completes its
2181 : * deletion before reporting that it isn't found. This function waits
2182 : * until the deletion _might_ have completed. Callers are responsible
2183 : * to recheck inode state.
2184 : *
2185 : * It doesn't matter if I_NEW is not set initially, a call to
2186 : * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2187 : * will DTRT.
2188 : */
2189 0 : static void __wait_on_freeing_inode(struct inode *inode)
2190 : {
2191 : wait_queue_head_t *wq;
2192 0 : DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2193 0 : wq = bit_waitqueue(&inode->i_state, __I_NEW);
2194 0 : prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2195 0 : spin_unlock(&inode->i_lock);
2196 0 : spin_unlock(&inode_hash_lock);
2197 0 : schedule();
2198 0 : finish_wait(wq, &wait.wq_entry);
2199 0 : spin_lock(&inode_hash_lock);
2200 0 : }
2201 :
2202 : static __initdata unsigned long ihash_entries;
2203 0 : static int __init set_ihash_entries(char *str)
2204 : {
2205 0 : if (!str)
2206 : return 0;
2207 0 : ihash_entries = simple_strtoul(str, &str, 0);
2208 0 : return 1;
2209 : }
2210 : __setup("ihash_entries=", set_ihash_entries);
2211 :
2212 : /*
2213 : * Initialize the waitqueues and inode hash table.
2214 : */
2215 1 : void __init inode_init_early(void)
2216 : {
2217 : /* If hashes are distributed across NUMA nodes, defer
2218 : * hash allocation until vmalloc space is available.
2219 : */
2220 : if (hashdist)
2221 : return;
2222 :
2223 1 : inode_hashtable =
2224 1 : alloc_large_system_hash("Inode-cache",
2225 : sizeof(struct hlist_head),
2226 : ihash_entries,
2227 : 14,
2228 : HASH_EARLY | HASH_ZERO,
2229 : &i_hash_shift,
2230 : &i_hash_mask,
2231 : 0,
2232 : 0);
2233 : }
2234 :
2235 1 : void __init inode_init(void)
2236 : {
2237 : /* inode slab cache */
2238 1 : inode_cachep = kmem_cache_create("inode_cache",
2239 : sizeof(struct inode),
2240 : 0,
2241 : (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2242 : SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2243 : init_once);
2244 :
2245 : /* Hash may have been set up in inode_init_early */
2246 : if (!hashdist)
2247 : return;
2248 :
2249 : inode_hashtable =
2250 : alloc_large_system_hash("Inode-cache",
2251 : sizeof(struct hlist_head),
2252 : ihash_entries,
2253 : 14,
2254 : HASH_ZERO,
2255 : &i_hash_shift,
2256 : &i_hash_mask,
2257 : 0,
2258 : 0);
2259 : }
2260 :
2261 1 : void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2262 : {
2263 1 : inode->i_mode = mode;
2264 1 : if (S_ISCHR(mode)) {
2265 1 : inode->i_fop = &def_chr_fops;
2266 1 : inode->i_rdev = rdev;
2267 0 : } else if (S_ISBLK(mode)) {
2268 0 : inode->i_fop = &def_blk_fops;
2269 0 : inode->i_rdev = rdev;
2270 0 : } else if (S_ISFIFO(mode))
2271 0 : inode->i_fop = &pipefifo_fops;
2272 0 : else if (S_ISSOCK(mode))
2273 : ; /* leave it no_open_fops */
2274 : else
2275 0 : printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2276 : " inode %s:%lu\n", mode, inode->i_sb->s_id,
2277 : inode->i_ino);
2278 1 : }
2279 : EXPORT_SYMBOL(init_special_inode);
2280 :
2281 : /**
2282 : * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2283 : * @idmap: idmap of the mount the inode was created from
2284 : * @inode: New inode
2285 : * @dir: Directory inode
2286 : * @mode: mode of the new inode
2287 : *
2288 : * If the inode has been created through an idmapped mount the idmap of
2289 : * the vfsmount must be passed through @idmap. This function will then take
2290 : * care to map the inode according to @idmap before checking permissions
2291 : * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2292 : * checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
2293 : */
2294 5 : void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
2295 : const struct inode *dir, umode_t mode)
2296 : {
2297 5 : inode_fsuid_set(inode, idmap);
2298 5 : if (dir && dir->i_mode & S_ISGID) {
2299 0 : inode->i_gid = dir->i_gid;
2300 :
2301 : /* Directories are special, and always inherit S_ISGID */
2302 0 : if (S_ISDIR(mode))
2303 0 : mode |= S_ISGID;
2304 : } else
2305 5 : inode_fsgid_set(inode, idmap);
2306 5 : inode->i_mode = mode;
2307 5 : }
2308 : EXPORT_SYMBOL(inode_init_owner);
2309 :
2310 : /**
2311 : * inode_owner_or_capable - check current task permissions to inode
2312 : * @idmap: idmap of the mount the inode was found from
2313 : * @inode: inode being checked
2314 : *
2315 : * Return true if current either has CAP_FOWNER in a namespace with the
2316 : * inode owner uid mapped, or owns the file.
2317 : *
2318 : * If the inode has been found through an idmapped mount the idmap of
2319 : * the vfsmount must be passed through @idmap. This function will then take
2320 : * care to map the inode according to @idmap before checking permissions.
2321 : * On non-idmapped mounts or if permission checking is to be performed on the
2322 : * raw inode simply passs @nop_mnt_idmap.
2323 : */
2324 0 : bool inode_owner_or_capable(struct mnt_idmap *idmap,
2325 : const struct inode *inode)
2326 : {
2327 : vfsuid_t vfsuid;
2328 : struct user_namespace *ns;
2329 :
2330 0 : vfsuid = i_uid_into_vfsuid(idmap, inode);
2331 0 : if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
2332 : return true;
2333 :
2334 0 : ns = current_user_ns();
2335 0 : if (vfsuid_has_mapping(ns, vfsuid) && ns_capable(ns, CAP_FOWNER))
2336 : return true;
2337 : return false;
2338 : }
2339 : EXPORT_SYMBOL(inode_owner_or_capable);
2340 :
2341 : /*
2342 : * Direct i/o helper functions
2343 : */
2344 0 : static void __inode_dio_wait(struct inode *inode)
2345 : {
2346 0 : wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2347 0 : DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2348 :
2349 : do {
2350 0 : prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2351 0 : if (atomic_read(&inode->i_dio_count))
2352 0 : schedule();
2353 0 : } while (atomic_read(&inode->i_dio_count));
2354 0 : finish_wait(wq, &q.wq_entry);
2355 0 : }
2356 :
2357 : /**
2358 : * inode_dio_wait - wait for outstanding DIO requests to finish
2359 : * @inode: inode to wait for
2360 : *
2361 : * Waits for all pending direct I/O requests to finish so that we can
2362 : * proceed with a truncate or equivalent operation.
2363 : *
2364 : * Must be called under a lock that serializes taking new references
2365 : * to i_dio_count, usually by inode->i_mutex.
2366 : */
2367 0 : void inode_dio_wait(struct inode *inode)
2368 : {
2369 0 : if (atomic_read(&inode->i_dio_count))
2370 0 : __inode_dio_wait(inode);
2371 0 : }
2372 : EXPORT_SYMBOL(inode_dio_wait);
2373 :
2374 : /*
2375 : * inode_set_flags - atomically set some inode flags
2376 : *
2377 : * Note: the caller should be holding i_mutex, or else be sure that
2378 : * they have exclusive access to the inode structure (i.e., while the
2379 : * inode is being instantiated). The reason for the cmpxchg() loop
2380 : * --- which wouldn't be necessary if all code paths which modify
2381 : * i_flags actually followed this rule, is that there is at least one
2382 : * code path which doesn't today so we use cmpxchg() out of an abundance
2383 : * of caution.
2384 : *
2385 : * In the long run, i_mutex is overkill, and we should probably look
2386 : * at using the i_lock spinlock to protect i_flags, and then make sure
2387 : * it is so documented in include/linux/fs.h and that all code follows
2388 : * the locking convention!!
2389 : */
2390 0 : void inode_set_flags(struct inode *inode, unsigned int flags,
2391 : unsigned int mask)
2392 : {
2393 0 : WARN_ON_ONCE(flags & ~mask);
2394 0 : set_mask_bits(&inode->i_flags, mask, flags);
2395 0 : }
2396 : EXPORT_SYMBOL(inode_set_flags);
2397 :
2398 0 : void inode_nohighmem(struct inode *inode)
2399 : {
2400 0 : mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2401 0 : }
2402 : EXPORT_SYMBOL(inode_nohighmem);
2403 :
2404 : /**
2405 : * timestamp_truncate - Truncate timespec to a granularity
2406 : * @t: Timespec
2407 : * @inode: inode being updated
2408 : *
2409 : * Truncate a timespec to the granularity supported by the fs
2410 : * containing the inode. Always rounds down. gran must
2411 : * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2412 : */
2413 54 : struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2414 : {
2415 54 : struct super_block *sb = inode->i_sb;
2416 54 : unsigned int gran = sb->s_time_gran;
2417 :
2418 54 : t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2419 54 : if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2420 0 : t.tv_nsec = 0;
2421 :
2422 : /* Avoid division in the common cases 1 ns and 1 s. */
2423 54 : if (gran == 1)
2424 : ; /* nothing */
2425 0 : else if (gran == NSEC_PER_SEC)
2426 : t.tv_nsec = 0;
2427 0 : else if (gran > 1 && gran < NSEC_PER_SEC)
2428 0 : t.tv_nsec -= t.tv_nsec % gran;
2429 : else
2430 0 : WARN(1, "invalid file time granularity: %u", gran);
2431 54 : return t;
2432 : }
2433 : EXPORT_SYMBOL(timestamp_truncate);
2434 :
2435 : /**
2436 : * current_time - Return FS time
2437 : * @inode: inode.
2438 : *
2439 : * Return the current time truncated to the time granularity supported by
2440 : * the fs.
2441 : *
2442 : * Note that inode and inode->sb cannot be NULL.
2443 : * Otherwise, the function warns and returns time without truncation.
2444 : */
2445 54 : struct timespec64 current_time(struct inode *inode)
2446 : {
2447 : struct timespec64 now;
2448 :
2449 54 : ktime_get_coarse_real_ts64(&now);
2450 :
2451 54 : if (unlikely(!inode->i_sb)) {
2452 0 : WARN(1, "current_time() called with uninitialized super_block in the inode");
2453 0 : return now;
2454 : }
2455 :
2456 54 : return timestamp_truncate(now, inode);
2457 : }
2458 : EXPORT_SYMBOL(current_time);
2459 :
2460 : /**
2461 : * in_group_or_capable - check whether caller is CAP_FSETID privileged
2462 : * @idmap: idmap of the mount @inode was found from
2463 : * @inode: inode to check
2464 : * @vfsgid: the new/current vfsgid of @inode
2465 : *
2466 : * Check wether @vfsgid is in the caller's group list or if the caller is
2467 : * privileged with CAP_FSETID over @inode. This can be used to determine
2468 : * whether the setgid bit can be kept or must be dropped.
2469 : *
2470 : * Return: true if the caller is sufficiently privileged, false if not.
2471 : */
2472 0 : bool in_group_or_capable(struct mnt_idmap *idmap,
2473 : const struct inode *inode, vfsgid_t vfsgid)
2474 : {
2475 0 : if (vfsgid_in_group_p(vfsgid))
2476 : return true;
2477 0 : if (capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
2478 : return true;
2479 0 : return false;
2480 : }
2481 :
2482 : /**
2483 : * mode_strip_sgid - handle the sgid bit for non-directories
2484 : * @idmap: idmap of the mount the inode was created from
2485 : * @dir: parent directory inode
2486 : * @mode: mode of the file to be created in @dir
2487 : *
2488 : * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
2489 : * raised and @dir has the S_ISGID bit raised ensure that the caller is
2490 : * either in the group of the parent directory or they have CAP_FSETID
2491 : * in their user namespace and are privileged over the parent directory.
2492 : * In all other cases, strip the S_ISGID bit from @mode.
2493 : *
2494 : * Return: the new mode to use for the file
2495 : */
2496 3 : umode_t mode_strip_sgid(struct mnt_idmap *idmap,
2497 : const struct inode *dir, umode_t mode)
2498 : {
2499 3 : if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
2500 : return mode;
2501 0 : if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
2502 : return mode;
2503 0 : if (in_group_or_capable(idmap, dir, i_gid_into_vfsgid(idmap, dir)))
2504 : return mode;
2505 0 : return mode & ~S_ISGID;
2506 : }
2507 : EXPORT_SYMBOL(mode_strip_sgid);
|