LCOV - code coverage report
Current view: top level - fs - super.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 155 736 21.1 %
Date: 2023-04-06 08:38:28 Functions: 14 63 22.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *  linux/fs/super.c
       4             :  *
       5             :  *  Copyright (C) 1991, 1992  Linus Torvalds
       6             :  *
       7             :  *  super.c contains code to handle: - mount structures
       8             :  *                                   - super-block tables
       9             :  *                                   - filesystem drivers list
      10             :  *                                   - mount system call
      11             :  *                                   - umount system call
      12             :  *                                   - ustat system call
      13             :  *
      14             :  * GK 2/5/95  -  Changed to support mounting the root fs via NFS
      15             :  *
      16             :  *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
      17             :  *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
      18             :  *  Added options to /proc/mounts:
      19             :  *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
      20             :  *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
      21             :  *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
      22             :  */
      23             : 
      24             : #include <linux/export.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/blkdev.h>
      27             : #include <linux/mount.h>
      28             : #include <linux/security.h>
      29             : #include <linux/writeback.h>              /* for the emergency remount stuff */
      30             : #include <linux/idr.h>
      31             : #include <linux/mutex.h>
      32             : #include <linux/backing-dev.h>
      33             : #include <linux/rculist_bl.h>
      34             : #include <linux/fscrypt.h>
      35             : #include <linux/fsnotify.h>
      36             : #include <linux/lockdep.h>
      37             : #include <linux/user_namespace.h>
      38             : #include <linux/fs_context.h>
      39             : #include <uapi/linux/mount.h>
      40             : #include "internal.h"
      41             : 
      42             : static int thaw_super_locked(struct super_block *sb);
      43             : 
      44             : static LIST_HEAD(super_blocks);
      45             : static DEFINE_SPINLOCK(sb_lock);
      46             : 
      47             : static char *sb_writers_name[SB_FREEZE_LEVELS] = {
      48             :         "sb_writers",
      49             :         "sb_pagefaults",
      50             :         "sb_internal",
      51             : };
      52             : 
      53             : /*
      54             :  * One thing we have to be careful of with a per-sb shrinker is that we don't
      55             :  * drop the last active reference to the superblock from within the shrinker.
      56             :  * If that happens we could trigger unregistering the shrinker from within the
      57             :  * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
      58             :  * take a passive reference to the superblock to avoid this from occurring.
      59             :  */
      60           0 : static unsigned long super_cache_scan(struct shrinker *shrink,
      61             :                                       struct shrink_control *sc)
      62             : {
      63             :         struct super_block *sb;
      64           0 :         long    fs_objects = 0;
      65             :         long    total_objects;
      66           0 :         long    freed = 0;
      67             :         long    dentries;
      68             :         long    inodes;
      69             : 
      70           0 :         sb = container_of(shrink, struct super_block, s_shrink);
      71             : 
      72             :         /*
      73             :          * Deadlock avoidance.  We may hold various FS locks, and we don't want
      74             :          * to recurse into the FS that called us in clear_inode() and friends..
      75             :          */
      76           0 :         if (!(sc->gfp_mask & __GFP_FS))
      77             :                 return SHRINK_STOP;
      78             : 
      79           0 :         if (!trylock_super(sb))
      80             :                 return SHRINK_STOP;
      81             : 
      82           0 :         if (sb->s_op->nr_cached_objects)
      83           0 :                 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
      84             : 
      85           0 :         inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
      86           0 :         dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
      87           0 :         total_objects = dentries + inodes + fs_objects + 1;
      88           0 :         if (!total_objects)
      89           0 :                 total_objects = 1;
      90             : 
      91             :         /* proportion the scan between the caches */
      92           0 :         dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
      93           0 :         inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
      94           0 :         fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
      95             : 
      96             :         /*
      97             :          * prune the dcache first as the icache is pinned by it, then
      98             :          * prune the icache, followed by the filesystem specific caches
      99             :          *
     100             :          * Ensure that we always scan at least one object - memcg kmem
     101             :          * accounting uses this to fully empty the caches.
     102             :          */
     103           0 :         sc->nr_to_scan = dentries + 1;
     104           0 :         freed = prune_dcache_sb(sb, sc);
     105           0 :         sc->nr_to_scan = inodes + 1;
     106           0 :         freed += prune_icache_sb(sb, sc);
     107             : 
     108           0 :         if (fs_objects) {
     109           0 :                 sc->nr_to_scan = fs_objects + 1;
     110           0 :                 freed += sb->s_op->free_cached_objects(sb, sc);
     111             :         }
     112             : 
     113           0 :         up_read(&sb->s_umount);
     114           0 :         return freed;
     115             : }
     116             : 
     117           0 : static unsigned long super_cache_count(struct shrinker *shrink,
     118             :                                        struct shrink_control *sc)
     119             : {
     120             :         struct super_block *sb;
     121           0 :         long    total_objects = 0;
     122             : 
     123           0 :         sb = container_of(shrink, struct super_block, s_shrink);
     124             : 
     125             :         /*
     126             :          * We don't call trylock_super() here as it is a scalability bottleneck,
     127             :          * so we're exposed to partial setup state. The shrinker rwsem does not
     128             :          * protect filesystem operations backing list_lru_shrink_count() or
     129             :          * s_op->nr_cached_objects(). Counts can change between
     130             :          * super_cache_count and super_cache_scan, so we really don't need locks
     131             :          * here.
     132             :          *
     133             :          * However, if we are currently mounting the superblock, the underlying
     134             :          * filesystem might be in a state of partial construction and hence it
     135             :          * is dangerous to access it.  trylock_super() uses a SB_BORN check to
     136             :          * avoid this situation, so do the same here. The memory barrier is
     137             :          * matched with the one in mount_fs() as we don't hold locks here.
     138             :          */
     139           0 :         if (!(sb->s_flags & SB_BORN))
     140             :                 return 0;
     141           0 :         smp_rmb();
     142             : 
     143           0 :         if (sb->s_op && sb->s_op->nr_cached_objects)
     144           0 :                 total_objects = sb->s_op->nr_cached_objects(sb, sc);
     145             : 
     146           0 :         total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
     147           0 :         total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
     148             : 
     149           0 :         if (!total_objects)
     150             :                 return SHRINK_EMPTY;
     151             : 
     152           0 :         total_objects = vfs_pressure_ratio(total_objects);
     153           0 :         return total_objects;
     154             : }
     155             : 
     156          17 : static void destroy_super_work(struct work_struct *work)
     157             : {
     158          17 :         struct super_block *s = container_of(work, struct super_block,
     159             :                                                         destroy_work);
     160             :         int i;
     161             : 
     162          68 :         for (i = 0; i < SB_FREEZE_LEVELS; i++)
     163          51 :                 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
     164          17 :         kfree(s);
     165          17 : }
     166             : 
     167          17 : static void destroy_super_rcu(struct rcu_head *head)
     168             : {
     169          17 :         struct super_block *s = container_of(head, struct super_block, rcu);
     170          34 :         INIT_WORK(&s->destroy_work, destroy_super_work);
     171          34 :         schedule_work(&s->destroy_work);
     172          17 : }
     173             : 
     174             : /* Free a superblock that has never been seen by anyone */
     175           0 : static void destroy_unused_super(struct super_block *s)
     176             : {
     177           0 :         if (!s)
     178             :                 return;
     179           0 :         up_write(&s->s_umount);
     180           0 :         list_lru_destroy(&s->s_dentry_lru);
     181           0 :         list_lru_destroy(&s->s_inode_lru);
     182           0 :         security_sb_free(s);
     183           0 :         put_user_ns(s->s_user_ns);
     184           0 :         kfree(s->s_subtype);
     185           0 :         free_prealloced_shrinker(&s->s_shrink);
     186             :         /* no delays needed */
     187           0 :         destroy_super_work(&s->destroy_work);
     188             : }
     189             : 
     190             : /**
     191             :  *      alloc_super     -       create new superblock
     192             :  *      @type:  filesystem type superblock should belong to
     193             :  *      @flags: the mount flags
     194             :  *      @user_ns: User namespace for the super_block
     195             :  *
     196             :  *      Allocates and initializes a new &struct super_block.  alloc_super()
     197             :  *      returns a pointer new superblock or %NULL if allocation had failed.
     198             :  */
     199          27 : static struct super_block *alloc_super(struct file_system_type *type, int flags,
     200             :                                        struct user_namespace *user_ns)
     201             : {
     202          27 :         struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
     203             :         static const struct super_operations default_op;
     204             :         int i;
     205             : 
     206          27 :         if (!s)
     207             :                 return NULL;
     208             : 
     209          54 :         INIT_LIST_HEAD(&s->s_mounts);
     210          27 :         s->s_user_ns = get_user_ns(user_ns);
     211          27 :         init_rwsem(&s->s_umount);
     212             :         lockdep_set_class(&s->s_umount, &type->s_umount_key);
     213             :         /*
     214             :          * sget() can have s_umount recursion.
     215             :          *
     216             :          * When it cannot find a suitable sb, it allocates a new
     217             :          * one (this one), and tries again to find a suitable old
     218             :          * one.
     219             :          *
     220             :          * In case that succeeds, it will acquire the s_umount
     221             :          * lock of the old one. Since these are clearly distrinct
     222             :          * locks, and this object isn't exposed yet, there's no
     223             :          * risk of deadlocks.
     224             :          *
     225             :          * Annotate this by putting this lock in a different
     226             :          * subclass.
     227             :          */
     228          27 :         down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
     229             : 
     230          27 :         if (security_sb_alloc(s))
     231             :                 goto fail;
     232             : 
     233         108 :         for (i = 0; i < SB_FREEZE_LEVELS; i++) {
     234         162 :                 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
     235          81 :                                         sb_writers_name[i],
     236             :                                         &type->s_writers_key[i]))
     237             :                         goto fail;
     238             :         }
     239          27 :         init_waitqueue_head(&s->s_writers.wait_unfrozen);
     240          27 :         s->s_bdi = &noop_backing_dev_info;
     241          27 :         s->s_flags = flags;
     242          27 :         if (s->s_user_ns != &init_user_ns)
     243           0 :                 s->s_iflags |= SB_I_NODEV;
     244          54 :         INIT_HLIST_NODE(&s->s_instances);
     245          27 :         INIT_HLIST_BL_HEAD(&s->s_roots);
     246          27 :         mutex_init(&s->s_sync_lock);
     247          54 :         INIT_LIST_HEAD(&s->s_inodes);
     248          27 :         spin_lock_init(&s->s_inode_list_lock);
     249          54 :         INIT_LIST_HEAD(&s->s_inodes_wb);
     250          27 :         spin_lock_init(&s->s_inode_wblist_lock);
     251             : 
     252          27 :         s->s_count = 1;
     253          54 :         atomic_set(&s->s_active, 1);
     254          27 :         mutex_init(&s->s_vfs_rename_mutex);
     255             :         lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
     256          27 :         init_rwsem(&s->s_dquot.dqio_sem);
     257          27 :         s->s_maxbytes = MAX_NON_LFS;
     258          27 :         s->s_op = &default_op;
     259          27 :         s->s_time_gran = 1000000000;
     260          27 :         s->s_time_min = TIME64_MIN;
     261          27 :         s->s_time_max = TIME64_MAX;
     262             : 
     263          27 :         s->s_shrink.seeks = DEFAULT_SEEKS;
     264          27 :         s->s_shrink.scan_objects = super_cache_scan;
     265          27 :         s->s_shrink.count_objects = super_cache_count;
     266          27 :         s->s_shrink.batch = 1024;
     267          27 :         s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
     268          27 :         if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name))
     269             :                 goto fail;
     270          27 :         if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
     271             :                 goto fail;
     272          27 :         if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
     273             :                 goto fail;
     274             :         return s;
     275             : 
     276             : fail:
     277           0 :         destroy_unused_super(s);
     278             :         return NULL;
     279             : }
     280             : 
     281             : /* Superblock refcounting  */
     282             : 
     283             : /*
     284             :  * Drop a superblock's refcount.  The caller must hold sb_lock.
     285             :  */
     286          17 : static void __put_super(struct super_block *s)
     287             : {
     288          17 :         if (!--s->s_count) {
     289          34 :                 list_del_init(&s->s_list);
     290          17 :                 WARN_ON(s->s_dentry_lru.node);
     291          17 :                 WARN_ON(s->s_inode_lru.node);
     292          17 :                 WARN_ON(!list_empty(&s->s_mounts));
     293          17 :                 security_sb_free(s);
     294          17 :                 put_user_ns(s->s_user_ns);
     295          17 :                 kfree(s->s_subtype);
     296          17 :                 call_rcu(&s->rcu, destroy_super_rcu);
     297             :         }
     298          17 : }
     299             : 
     300             : /**
     301             :  *      put_super       -       drop a temporary reference to superblock
     302             :  *      @sb: superblock in question
     303             :  *
     304             :  *      Drops a temporary reference, frees superblock if there's no
     305             :  *      references left.
     306             :  */
     307           0 : void put_super(struct super_block *sb)
     308             : {
     309          17 :         spin_lock(&sb_lock);
     310          17 :         __put_super(sb);
     311          17 :         spin_unlock(&sb_lock);
     312           0 : }
     313             : 
     314             : 
     315             : /**
     316             :  *      deactivate_locked_super -       drop an active reference to superblock
     317             :  *      @s: superblock to deactivate
     318             :  *
     319             :  *      Drops an active reference to superblock, converting it into a temporary
     320             :  *      one if there is no other active references left.  In that case we
     321             :  *      tell fs driver to shut it down and drop the temporary reference we
     322             :  *      had just acquired.
     323             :  *
     324             :  *      Caller holds exclusive lock on superblock; that lock is released.
     325             :  */
     326          17 : void deactivate_locked_super(struct super_block *s)
     327             : {
     328          17 :         struct file_system_type *fs = s->s_type;
     329          34 :         if (atomic_dec_and_test(&s->s_active)) {
     330          17 :                 unregister_shrinker(&s->s_shrink);
     331          17 :                 fs->kill_sb(s);
     332             : 
     333             :                 /*
     334             :                  * Since list_lru_destroy() may sleep, we cannot call it from
     335             :                  * put_super(), where we hold the sb_lock. Therefore we destroy
     336             :                  * the lru lists right now.
     337             :                  */
     338          17 :                 list_lru_destroy(&s->s_dentry_lru);
     339          17 :                 list_lru_destroy(&s->s_inode_lru);
     340             : 
     341          17 :                 put_filesystem(fs);
     342             :                 put_super(s);
     343             :         } else {
     344           0 :                 up_write(&s->s_umount);
     345             :         }
     346          17 : }
     347             : 
     348             : EXPORT_SYMBOL(deactivate_locked_super);
     349             : 
     350             : /**
     351             :  *      deactivate_super        -       drop an active reference to superblock
     352             :  *      @s: superblock to deactivate
     353             :  *
     354             :  *      Variant of deactivate_locked_super(), except that superblock is *not*
     355             :  *      locked by caller.  If we are going to drop the final active reference,
     356             :  *      lock will be acquired prior to that.
     357             :  */
     358          44 : void deactivate_super(struct super_block *s)
     359             : {
     360          88 :         if (!atomic_add_unless(&s->s_active, -1, 1)) {
     361          17 :                 down_write(&s->s_umount);
     362          17 :                 deactivate_locked_super(s);
     363             :         }
     364          44 : }
     365             : 
     366             : EXPORT_SYMBOL(deactivate_super);
     367             : 
     368             : /**
     369             :  *      grab_super - acquire an active reference
     370             :  *      @s: reference we are trying to make active
     371             :  *
     372             :  *      Tries to acquire an active reference.  grab_super() is used when we
     373             :  *      had just found a superblock in super_blocks or fs_type->fs_supers
     374             :  *      and want to turn it into a full-blown active reference.  grab_super()
     375             :  *      is called with sb_lock held and drops it.  Returns 1 in case of
     376             :  *      success, 0 if we had failed (superblock contents was already dead or
     377             :  *      dying when grab_super() had been called).  Note that this is only
     378             :  *      called for superblocks not in rundown mode (== ones still on ->fs_supers
     379             :  *      of their type), so increment of ->s_count is OK here.
     380             :  */
     381           0 : static int grab_super(struct super_block *s) __releases(sb_lock)
     382             : {
     383           0 :         s->s_count++;
     384           0 :         spin_unlock(&sb_lock);
     385           0 :         down_write(&s->s_umount);
     386           0 :         if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
     387           0 :                 put_super(s);
     388           0 :                 return 1;
     389             :         }
     390           0 :         up_write(&s->s_umount);
     391           0 :         put_super(s);
     392           0 :         return 0;
     393             : }
     394             : 
     395             : /*
     396             :  *      trylock_super - try to grab ->s_umount shared
     397             :  *      @sb: reference we are trying to grab
     398             :  *
     399             :  *      Try to prevent fs shutdown.  This is used in places where we
     400             :  *      cannot take an active reference but we need to ensure that the
     401             :  *      filesystem is not shut down while we are working on it. It returns
     402             :  *      false if we cannot acquire s_umount or if we lose the race and
     403             :  *      filesystem already got into shutdown, and returns true with the s_umount
     404             :  *      lock held in read mode in case of success. On successful return,
     405             :  *      the caller must drop the s_umount lock when done.
     406             :  *
     407             :  *      Note that unlike get_super() et.al. this one does *not* bump ->s_count.
     408             :  *      The reason why it's safe is that we are OK with doing trylock instead
     409             :  *      of down_read().  There's a couple of places that are OK with that, but
     410             :  *      it's very much not a general-purpose interface.
     411             :  */
     412           0 : bool trylock_super(struct super_block *sb)
     413             : {
     414           0 :         if (down_read_trylock(&sb->s_umount)) {
     415           0 :                 if (!hlist_unhashed(&sb->s_instances) &&
     416           0 :                     sb->s_root && (sb->s_flags & SB_BORN))
     417             :                         return true;
     418           0 :                 up_read(&sb->s_umount);
     419             :         }
     420             : 
     421             :         return false;
     422             : }
     423             : 
     424             : /**
     425             :  *      retire_super    -       prevents superblock from being reused
     426             :  *      @sb: superblock to retire
     427             :  *
     428             :  *      The function marks superblock to be ignored in superblock test, which
     429             :  *      prevents it from being reused for any new mounts.  If the superblock has
     430             :  *      a private bdi, it also unregisters it, but doesn't reduce the refcount
     431             :  *      of the superblock to prevent potential races.  The refcount is reduced
     432             :  *      by generic_shutdown_super().  The function can not be called
     433             :  *      concurrently with generic_shutdown_super().  It is safe to call the
     434             :  *      function multiple times, subsequent calls have no effect.
     435             :  *
     436             :  *      The marker will affect the re-use only for block-device-based
     437             :  *      superblocks.  Other superblocks will still get marked if this function
     438             :  *      is used, but that will not affect their reusability.
     439             :  */
     440           0 : void retire_super(struct super_block *sb)
     441             : {
     442           0 :         WARN_ON(!sb->s_bdev);
     443           0 :         down_write(&sb->s_umount);
     444           0 :         if (sb->s_iflags & SB_I_PERSB_BDI) {
     445           0 :                 bdi_unregister(sb->s_bdi);
     446           0 :                 sb->s_iflags &= ~SB_I_PERSB_BDI;
     447             :         }
     448           0 :         sb->s_iflags |= SB_I_RETIRED;
     449           0 :         up_write(&sb->s_umount);
     450           0 : }
     451             : EXPORT_SYMBOL(retire_super);
     452             : 
     453             : /**
     454             :  *      generic_shutdown_super  -       common helper for ->kill_sb()
     455             :  *      @sb: superblock to kill
     456             :  *
     457             :  *      generic_shutdown_super() does all fs-independent work on superblock
     458             :  *      shutdown.  Typical ->kill_sb() should pick all fs-specific objects
     459             :  *      that need destruction out of superblock, call generic_shutdown_super()
     460             :  *      and release aforementioned objects.  Note: dentries and inodes _are_
     461             :  *      taken care of and do not need specific handling.
     462             :  *
     463             :  *      Upon calling this function, the filesystem may no longer alter or
     464             :  *      rearrange the set of dentries belonging to this super_block, nor may it
     465             :  *      change the attachments of dentries to inodes.
     466             :  */
     467          17 : void generic_shutdown_super(struct super_block *sb)
     468             : {
     469          17 :         const struct super_operations *sop = sb->s_op;
     470             : 
     471          17 :         if (sb->s_root) {
     472          17 :                 shrink_dcache_for_umount(sb);
     473          17 :                 sync_filesystem(sb);
     474          17 :                 sb->s_flags &= ~SB_ACTIVE;
     475             : 
     476             :                 cgroup_writeback_umount();
     477             : 
     478             :                 /* evict all inodes with zero refcount */
     479          17 :                 evict_inodes(sb);
     480             :                 /* only nonzero refcount inodes can have marks */
     481          17 :                 fsnotify_sb_delete(sb);
     482          17 :                 fscrypt_destroy_keyring(sb);
     483          17 :                 security_sb_delete(sb);
     484             : 
     485          17 :                 if (sb->s_dio_done_wq) {
     486           0 :                         destroy_workqueue(sb->s_dio_done_wq);
     487           0 :                         sb->s_dio_done_wq = NULL;
     488             :                 }
     489             : 
     490          17 :                 if (sop->put_super)
     491           0 :                         sop->put_super(sb);
     492             : 
     493          34 :                 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
     494             :                                 "VFS: Busy inodes after unmount of %s (%s)",
     495             :                                 sb->s_id, sb->s_type->name)) {
     496             :                         /*
     497             :                          * Adding a proper bailout path here would be hard, but
     498             :                          * we can at least make it more likely that a later
     499             :                          * iput_final() or such crashes cleanly.
     500             :                          */
     501             :                         struct inode *inode;
     502             : 
     503           0 :                         spin_lock(&sb->s_inode_list_lock);
     504           0 :                         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
     505           0 :                                 inode->i_op = VFS_PTR_POISON;
     506           0 :                                 inode->i_sb = VFS_PTR_POISON;
     507           0 :                                 inode->i_mapping = VFS_PTR_POISON;
     508             :                         }
     509           0 :                         spin_unlock(&sb->s_inode_list_lock);
     510             :                 }
     511             :         }
     512          17 :         spin_lock(&sb_lock);
     513             :         /* should be initialized for __put_super_and_need_restart() */
     514          34 :         hlist_del_init(&sb->s_instances);
     515          17 :         spin_unlock(&sb_lock);
     516          17 :         up_write(&sb->s_umount);
     517          17 :         if (sb->s_bdi != &noop_backing_dev_info) {
     518           0 :                 if (sb->s_iflags & SB_I_PERSB_BDI)
     519           0 :                         bdi_unregister(sb->s_bdi);
     520           0 :                 bdi_put(sb->s_bdi);
     521           0 :                 sb->s_bdi = &noop_backing_dev_info;
     522             :         }
     523          17 : }
     524             : 
     525             : EXPORT_SYMBOL(generic_shutdown_super);
     526             : 
     527           0 : bool mount_capable(struct fs_context *fc)
     528             : {
     529           0 :         if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
     530           0 :                 return capable(CAP_SYS_ADMIN);
     531             :         else
     532           0 :                 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
     533             : }
     534             : 
     535             : /**
     536             :  * sget_fc - Find or create a superblock
     537             :  * @fc: Filesystem context.
     538             :  * @test: Comparison callback
     539             :  * @set: Setup callback
     540             :  *
     541             :  * Find or create a superblock using the parameters stored in the filesystem
     542             :  * context and the two callback functions.
     543             :  *
     544             :  * If an extant superblock is matched, then that will be returned with an
     545             :  * elevated reference count that the caller must transfer or discard.
     546             :  *
     547             :  * If no match is made, a new superblock will be allocated and basic
     548             :  * initialisation will be performed (s_type, s_fs_info and s_id will be set and
     549             :  * the set() callback will be invoked), the superblock will be published and it
     550             :  * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE
     551             :  * as yet unset.
     552             :  */
     553          27 : struct super_block *sget_fc(struct fs_context *fc,
     554             :                             int (*test)(struct super_block *, struct fs_context *),
     555             :                             int (*set)(struct super_block *, struct fs_context *))
     556             : {
     557          27 :         struct super_block *s = NULL;
     558             :         struct super_block *old;
     559          27 :         struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
     560             :         int err;
     561             : 
     562             : retry:
     563          54 :         spin_lock(&sb_lock);
     564          54 :         if (test) {
     565           0 :                 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
     566           0 :                         if (test(old, fc))
     567             :                                 goto share_extant_sb;
     568             :                 }
     569             :         }
     570          54 :         if (!s) {
     571          27 :                 spin_unlock(&sb_lock);
     572          27 :                 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
     573          27 :                 if (!s)
     574             :                         return ERR_PTR(-ENOMEM);
     575             :                 goto retry;
     576             :         }
     577             : 
     578          27 :         s->s_fs_info = fc->s_fs_info;
     579          27 :         err = set(s, fc);
     580          27 :         if (err) {
     581           0 :                 s->s_fs_info = NULL;
     582           0 :                 spin_unlock(&sb_lock);
     583           0 :                 destroy_unused_super(s);
     584           0 :                 return ERR_PTR(err);
     585             :         }
     586          27 :         fc->s_fs_info = NULL;
     587          27 :         s->s_type = fc->fs_type;
     588          27 :         s->s_iflags |= fc->s_iflags;
     589          27 :         strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
     590          54 :         list_add_tail(&s->s_list, &super_blocks);
     591          54 :         hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
     592          27 :         spin_unlock(&sb_lock);
     593          27 :         get_filesystem(s->s_type);
     594          27 :         register_shrinker_prepared(&s->s_shrink);
     595          27 :         return s;
     596             : 
     597             : share_extant_sb:
     598           0 :         if (user_ns != old->s_user_ns) {
     599           0 :                 spin_unlock(&sb_lock);
     600           0 :                 destroy_unused_super(s);
     601           0 :                 return ERR_PTR(-EBUSY);
     602             :         }
     603           0 :         if (!grab_super(old))
     604             :                 goto retry;
     605           0 :         destroy_unused_super(s);
     606           0 :         return old;
     607             : }
     608             : EXPORT_SYMBOL(sget_fc);
     609             : 
     610             : /**
     611             :  *      sget    -       find or create a superblock
     612             :  *      @type:    filesystem type superblock should belong to
     613             :  *      @test:    comparison callback
     614             :  *      @set:     setup callback
     615             :  *      @flags:   mount flags
     616             :  *      @data:    argument to each of them
     617             :  */
     618           0 : struct super_block *sget(struct file_system_type *type,
     619             :                         int (*test)(struct super_block *,void *),
     620             :                         int (*set)(struct super_block *,void *),
     621             :                         int flags,
     622             :                         void *data)
     623             : {
     624           0 :         struct user_namespace *user_ns = current_user_ns();
     625           0 :         struct super_block *s = NULL;
     626             :         struct super_block *old;
     627             :         int err;
     628             : 
     629             :         /* We don't yet pass the user namespace of the parent
     630             :          * mount through to here so always use &init_user_ns
     631             :          * until that changes.
     632             :          */
     633             :         if (flags & SB_SUBMOUNT)
     634             :                 user_ns = &init_user_ns;
     635             : 
     636             : retry:
     637           0 :         spin_lock(&sb_lock);
     638           0 :         if (test) {
     639           0 :                 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
     640           0 :                         if (!test(old, data))
     641           0 :                                 continue;
     642           0 :                         if (user_ns != old->s_user_ns) {
     643           0 :                                 spin_unlock(&sb_lock);
     644           0 :                                 destroy_unused_super(s);
     645           0 :                                 return ERR_PTR(-EBUSY);
     646             :                         }
     647           0 :                         if (!grab_super(old))
     648             :                                 goto retry;
     649           0 :                         destroy_unused_super(s);
     650           0 :                         return old;
     651             :                 }
     652             :         }
     653           0 :         if (!s) {
     654           0 :                 spin_unlock(&sb_lock);
     655           0 :                 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
     656           0 :                 if (!s)
     657             :                         return ERR_PTR(-ENOMEM);
     658             :                 goto retry;
     659             :         }
     660             : 
     661           0 :         err = set(s, data);
     662           0 :         if (err) {
     663           0 :                 spin_unlock(&sb_lock);
     664           0 :                 destroy_unused_super(s);
     665           0 :                 return ERR_PTR(err);
     666             :         }
     667           0 :         s->s_type = type;
     668           0 :         strlcpy(s->s_id, type->name, sizeof(s->s_id));
     669           0 :         list_add_tail(&s->s_list, &super_blocks);
     670           0 :         hlist_add_head(&s->s_instances, &type->fs_supers);
     671           0 :         spin_unlock(&sb_lock);
     672           0 :         get_filesystem(type);
     673           0 :         register_shrinker_prepared(&s->s_shrink);
     674           0 :         return s;
     675             : }
     676             : EXPORT_SYMBOL(sget);
     677             : 
     678           0 : void drop_super(struct super_block *sb)
     679             : {
     680           0 :         up_read(&sb->s_umount);
     681           0 :         put_super(sb);
     682           0 : }
     683             : 
     684             : EXPORT_SYMBOL(drop_super);
     685             : 
     686           0 : void drop_super_exclusive(struct super_block *sb)
     687             : {
     688           0 :         up_write(&sb->s_umount);
     689           0 :         put_super(sb);
     690           0 : }
     691             : EXPORT_SYMBOL(drop_super_exclusive);
     692             : 
     693           0 : static void __iterate_supers(void (*f)(struct super_block *))
     694             : {
     695           0 :         struct super_block *sb, *p = NULL;
     696             : 
     697           0 :         spin_lock(&sb_lock);
     698           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     699           0 :                 if (hlist_unhashed(&sb->s_instances))
     700           0 :                         continue;
     701           0 :                 sb->s_count++;
     702           0 :                 spin_unlock(&sb_lock);
     703             : 
     704           0 :                 f(sb);
     705             : 
     706           0 :                 spin_lock(&sb_lock);
     707           0 :                 if (p)
     708           0 :                         __put_super(p);
     709             :                 p = sb;
     710             :         }
     711           0 :         if (p)
     712           0 :                 __put_super(p);
     713           0 :         spin_unlock(&sb_lock);
     714           0 : }
     715             : /**
     716             :  *      iterate_supers - call function for all active superblocks
     717             :  *      @f: function to call
     718             :  *      @arg: argument to pass to it
     719             :  *
     720             :  *      Scans the superblock list and calls given function, passing it
     721             :  *      locked superblock and given argument.
     722             :  */
     723           0 : void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
     724             : {
     725           0 :         struct super_block *sb, *p = NULL;
     726             : 
     727           0 :         spin_lock(&sb_lock);
     728           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     729           0 :                 if (hlist_unhashed(&sb->s_instances))
     730           0 :                         continue;
     731           0 :                 sb->s_count++;
     732           0 :                 spin_unlock(&sb_lock);
     733             : 
     734           0 :                 down_read(&sb->s_umount);
     735           0 :                 if (sb->s_root && (sb->s_flags & SB_BORN))
     736           0 :                         f(sb, arg);
     737           0 :                 up_read(&sb->s_umount);
     738             : 
     739           0 :                 spin_lock(&sb_lock);
     740           0 :                 if (p)
     741           0 :                         __put_super(p);
     742             :                 p = sb;
     743             :         }
     744           0 :         if (p)
     745           0 :                 __put_super(p);
     746           0 :         spin_unlock(&sb_lock);
     747           0 : }
     748             : 
     749             : /**
     750             :  *      iterate_supers_type - call function for superblocks of given type
     751             :  *      @type: fs type
     752             :  *      @f: function to call
     753             :  *      @arg: argument to pass to it
     754             :  *
     755             :  *      Scans the superblock list and calls given function, passing it
     756             :  *      locked superblock and given argument.
     757             :  */
     758           0 : void iterate_supers_type(struct file_system_type *type,
     759             :         void (*f)(struct super_block *, void *), void *arg)
     760             : {
     761           0 :         struct super_block *sb, *p = NULL;
     762             : 
     763           0 :         spin_lock(&sb_lock);
     764           0 :         hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
     765           0 :                 sb->s_count++;
     766           0 :                 spin_unlock(&sb_lock);
     767             : 
     768           0 :                 down_read(&sb->s_umount);
     769           0 :                 if (sb->s_root && (sb->s_flags & SB_BORN))
     770           0 :                         f(sb, arg);
     771           0 :                 up_read(&sb->s_umount);
     772             : 
     773           0 :                 spin_lock(&sb_lock);
     774           0 :                 if (p)
     775           0 :                         __put_super(p);
     776           0 :                 p = sb;
     777             :         }
     778           0 :         if (p)
     779           0 :                 __put_super(p);
     780           0 :         spin_unlock(&sb_lock);
     781           0 : }
     782             : 
     783             : EXPORT_SYMBOL(iterate_supers_type);
     784             : 
     785             : /**
     786             :  * get_super - get the superblock of a device
     787             :  * @bdev: device to get the superblock for
     788             :  *
     789             :  * Scans the superblock list and finds the superblock of the file system
     790             :  * mounted on the device given. %NULL is returned if no match is found.
     791             :  */
     792           0 : struct super_block *get_super(struct block_device *bdev)
     793             : {
     794             :         struct super_block *sb;
     795             : 
     796           0 :         if (!bdev)
     797             :                 return NULL;
     798             : 
     799             :         spin_lock(&sb_lock);
     800             : rescan:
     801           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     802           0 :                 if (hlist_unhashed(&sb->s_instances))
     803           0 :                         continue;
     804           0 :                 if (sb->s_bdev == bdev) {
     805           0 :                         sb->s_count++;
     806           0 :                         spin_unlock(&sb_lock);
     807           0 :                         down_read(&sb->s_umount);
     808             :                         /* still alive? */
     809           0 :                         if (sb->s_root && (sb->s_flags & SB_BORN))
     810             :                                 return sb;
     811           0 :                         up_read(&sb->s_umount);
     812             :                         /* nope, got unmounted */
     813           0 :                         spin_lock(&sb_lock);
     814           0 :                         __put_super(sb);
     815           0 :                         goto rescan;
     816             :                 }
     817             :         }
     818           0 :         spin_unlock(&sb_lock);
     819           0 :         return NULL;
     820             : }
     821             : 
     822             : /**
     823             :  * get_active_super - get an active reference to the superblock of a device
     824             :  * @bdev: device to get the superblock for
     825             :  *
     826             :  * Scans the superblock list and finds the superblock of the file system
     827             :  * mounted on the device given.  Returns the superblock with an active
     828             :  * reference or %NULL if none was found.
     829             :  */
     830           0 : struct super_block *get_active_super(struct block_device *bdev)
     831             : {
     832             :         struct super_block *sb;
     833             : 
     834           0 :         if (!bdev)
     835             :                 return NULL;
     836             : 
     837             : restart:
     838           0 :         spin_lock(&sb_lock);
     839           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     840           0 :                 if (hlist_unhashed(&sb->s_instances))
     841           0 :                         continue;
     842           0 :                 if (sb->s_bdev == bdev) {
     843           0 :                         if (!grab_super(sb))
     844             :                                 goto restart;
     845           0 :                         up_write(&sb->s_umount);
     846           0 :                         return sb;
     847             :                 }
     848             :         }
     849           0 :         spin_unlock(&sb_lock);
     850           0 :         return NULL;
     851             : }
     852             : 
     853           0 : struct super_block *user_get_super(dev_t dev, bool excl)
     854             : {
     855             :         struct super_block *sb;
     856             : 
     857             :         spin_lock(&sb_lock);
     858             : rescan:
     859           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     860           0 :                 if (hlist_unhashed(&sb->s_instances))
     861           0 :                         continue;
     862           0 :                 if (sb->s_dev ==  dev) {
     863           0 :                         sb->s_count++;
     864           0 :                         spin_unlock(&sb_lock);
     865           0 :                         if (excl)
     866           0 :                                 down_write(&sb->s_umount);
     867             :                         else
     868           0 :                                 down_read(&sb->s_umount);
     869             :                         /* still alive? */
     870           0 :                         if (sb->s_root && (sb->s_flags & SB_BORN))
     871             :                                 return sb;
     872           0 :                         if (excl)
     873           0 :                                 up_write(&sb->s_umount);
     874             :                         else
     875           0 :                                 up_read(&sb->s_umount);
     876             :                         /* nope, got unmounted */
     877           0 :                         spin_lock(&sb_lock);
     878           0 :                         __put_super(sb);
     879           0 :                         goto rescan;
     880             :                 }
     881             :         }
     882           0 :         spin_unlock(&sb_lock);
     883           0 :         return NULL;
     884             : }
     885             : 
     886             : /**
     887             :  * reconfigure_super - asks filesystem to change superblock parameters
     888             :  * @fc: The superblock and configuration
     889             :  *
     890             :  * Alters the configuration parameters of a live superblock.
     891             :  */
     892           0 : int reconfigure_super(struct fs_context *fc)
     893             : {
     894           0 :         struct super_block *sb = fc->root->d_sb;
     895             :         int retval;
     896           0 :         bool remount_ro = false;
     897           0 :         bool force = fc->sb_flags & SB_FORCE;
     898             : 
     899           0 :         if (fc->sb_flags_mask & ~MS_RMT_MASK)
     900             :                 return -EINVAL;
     901           0 :         if (sb->s_writers.frozen != SB_UNFROZEN)
     902             :                 return -EBUSY;
     903             : 
     904           0 :         retval = security_sb_remount(sb, fc->security);
     905             :         if (retval)
     906             :                 return retval;
     907             : 
     908           0 :         if (fc->sb_flags_mask & SB_RDONLY) {
     909             : #ifdef CONFIG_BLOCK
     910           0 :                 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
     911           0 :                     bdev_read_only(sb->s_bdev))
     912             :                         return -EACCES;
     913             : #endif
     914             : 
     915           0 :                 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
     916             :         }
     917             : 
     918           0 :         if (remount_ro) {
     919           0 :                 if (!hlist_empty(&sb->s_pins)) {
     920           0 :                         up_write(&sb->s_umount);
     921           0 :                         group_pin_kill(&sb->s_pins);
     922           0 :                         down_write(&sb->s_umount);
     923           0 :                         if (!sb->s_root)
     924             :                                 return 0;
     925           0 :                         if (sb->s_writers.frozen != SB_UNFROZEN)
     926             :                                 return -EBUSY;
     927           0 :                         remount_ro = !sb_rdonly(sb);
     928             :                 }
     929             :         }
     930           0 :         shrink_dcache_sb(sb);
     931             : 
     932             :         /* If we are reconfiguring to RDONLY and current sb is read/write,
     933             :          * make sure there are no files open for writing.
     934             :          */
     935           0 :         if (remount_ro) {
     936           0 :                 if (force) {
     937           0 :                         sb->s_readonly_remount = 1;
     938           0 :                         smp_wmb();
     939             :                 } else {
     940           0 :                         retval = sb_prepare_remount_readonly(sb);
     941           0 :                         if (retval)
     942             :                                 return retval;
     943             :                 }
     944             :         }
     945             : 
     946           0 :         if (fc->ops->reconfigure) {
     947           0 :                 retval = fc->ops->reconfigure(fc);
     948           0 :                 if (retval) {
     949           0 :                         if (!force)
     950             :                                 goto cancel_readonly;
     951             :                         /* If forced remount, go ahead despite any errors */
     952           0 :                         WARN(1, "forced remount of a %s fs returned %i\n",
     953             :                              sb->s_type->name, retval);
     954             :                 }
     955             :         }
     956             : 
     957           0 :         WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
     958             :                                  (fc->sb_flags & fc->sb_flags_mask)));
     959             :         /* Needs to be ordered wrt mnt_is_readonly() */
     960           0 :         smp_wmb();
     961           0 :         sb->s_readonly_remount = 0;
     962             : 
     963             :         /*
     964             :          * Some filesystems modify their metadata via some other path than the
     965             :          * bdev buffer cache (eg. use a private mapping, or directories in
     966             :          * pagecache, etc). Also file data modifications go via their own
     967             :          * mappings. So If we try to mount readonly then copy the filesystem
     968             :          * from bdev, we could get stale data, so invalidate it to give a best
     969             :          * effort at coherency.
     970             :          */
     971           0 :         if (remount_ro && sb->s_bdev)
     972           0 :                 invalidate_bdev(sb->s_bdev);
     973             :         return 0;
     974             : 
     975             : cancel_readonly:
     976           0 :         sb->s_readonly_remount = 0;
     977           0 :         return retval;
     978             : }
     979             : 
     980           0 : static void do_emergency_remount_callback(struct super_block *sb)
     981             : {
     982           0 :         down_write(&sb->s_umount);
     983           0 :         if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
     984           0 :             !sb_rdonly(sb)) {
     985             :                 struct fs_context *fc;
     986             : 
     987           0 :                 fc = fs_context_for_reconfigure(sb->s_root,
     988             :                                         SB_RDONLY | SB_FORCE, SB_RDONLY);
     989           0 :                 if (!IS_ERR(fc)) {
     990           0 :                         if (parse_monolithic_mount_data(fc, NULL) == 0)
     991           0 :                                 (void)reconfigure_super(fc);
     992           0 :                         put_fs_context(fc);
     993             :                 }
     994             :         }
     995           0 :         up_write(&sb->s_umount);
     996           0 : }
     997             : 
     998           0 : static void do_emergency_remount(struct work_struct *work)
     999             : {
    1000           0 :         __iterate_supers(do_emergency_remount_callback);
    1001           0 :         kfree(work);
    1002           0 :         printk("Emergency Remount complete\n");
    1003           0 : }
    1004             : 
    1005           0 : void emergency_remount(void)
    1006             : {
    1007             :         struct work_struct *work;
    1008             : 
    1009           0 :         work = kmalloc(sizeof(*work), GFP_ATOMIC);
    1010           0 :         if (work) {
    1011           0 :                 INIT_WORK(work, do_emergency_remount);
    1012             :                 schedule_work(work);
    1013             :         }
    1014           0 : }
    1015             : 
    1016           0 : static void do_thaw_all_callback(struct super_block *sb)
    1017             : {
    1018           0 :         down_write(&sb->s_umount);
    1019           0 :         if (sb->s_root && sb->s_flags & SB_BORN) {
    1020           0 :                 emergency_thaw_bdev(sb);
    1021           0 :                 thaw_super_locked(sb);
    1022             :         } else {
    1023           0 :                 up_write(&sb->s_umount);
    1024             :         }
    1025           0 : }
    1026             : 
    1027           0 : static void do_thaw_all(struct work_struct *work)
    1028             : {
    1029           0 :         __iterate_supers(do_thaw_all_callback);
    1030           0 :         kfree(work);
    1031           0 :         printk(KERN_WARNING "Emergency Thaw complete\n");
    1032           0 : }
    1033             : 
    1034             : /**
    1035             :  * emergency_thaw_all -- forcibly thaw every frozen filesystem
    1036             :  *
    1037             :  * Used for emergency unfreeze of all filesystems via SysRq
    1038             :  */
    1039           0 : void emergency_thaw_all(void)
    1040             : {
    1041             :         struct work_struct *work;
    1042             : 
    1043           0 :         work = kmalloc(sizeof(*work), GFP_ATOMIC);
    1044           0 :         if (work) {
    1045           0 :                 INIT_WORK(work, do_thaw_all);
    1046             :                 schedule_work(work);
    1047             :         }
    1048           0 : }
    1049             : 
    1050             : static DEFINE_IDA(unnamed_dev_ida);
    1051             : 
    1052             : /**
    1053             :  * get_anon_bdev - Allocate a block device for filesystems which don't have one.
    1054             :  * @p: Pointer to a dev_t.
    1055             :  *
    1056             :  * Filesystems which don't use real block devices can call this function
    1057             :  * to allocate a virtual block device.
    1058             :  *
    1059             :  * Context: Any context.  Frequently called while holding sb_lock.
    1060             :  * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
    1061             :  * or -ENOMEM if memory allocation failed.
    1062             :  */
    1063          27 : int get_anon_bdev(dev_t *p)
    1064             : {
    1065             :         int dev;
    1066             : 
    1067             :         /*
    1068             :          * Many userspace utilities consider an FSID of 0 invalid.
    1069             :          * Always return at least 1 from get_anon_bdev.
    1070             :          */
    1071          27 :         dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
    1072             :                         GFP_ATOMIC);
    1073          27 :         if (dev == -ENOSPC)
    1074           0 :                 dev = -EMFILE;
    1075          27 :         if (dev < 0)
    1076             :                 return dev;
    1077             : 
    1078          27 :         *p = MKDEV(0, dev);
    1079          27 :         return 0;
    1080             : }
    1081             : EXPORT_SYMBOL(get_anon_bdev);
    1082             : 
    1083           0 : void free_anon_bdev(dev_t dev)
    1084             : {
    1085          17 :         ida_free(&unnamed_dev_ida, MINOR(dev));
    1086           0 : }
    1087             : EXPORT_SYMBOL(free_anon_bdev);
    1088             : 
    1089           0 : int set_anon_super(struct super_block *s, void *data)
    1090             : {
    1091          27 :         return get_anon_bdev(&s->s_dev);
    1092             : }
    1093             : EXPORT_SYMBOL(set_anon_super);
    1094             : 
    1095          17 : void kill_anon_super(struct super_block *sb)
    1096             : {
    1097          17 :         dev_t dev = sb->s_dev;
    1098          17 :         generic_shutdown_super(sb);
    1099          17 :         free_anon_bdev(dev);
    1100          17 : }
    1101             : EXPORT_SYMBOL(kill_anon_super);
    1102             : 
    1103           0 : void kill_litter_super(struct super_block *sb)
    1104             : {
    1105           0 :         if (sb->s_root)
    1106           0 :                 d_genocide(sb->s_root);
    1107           0 :         kill_anon_super(sb);
    1108           0 : }
    1109             : EXPORT_SYMBOL(kill_litter_super);
    1110             : 
    1111          27 : int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
    1112             : {
    1113          27 :         return set_anon_super(sb, NULL);
    1114             : }
    1115             : EXPORT_SYMBOL(set_anon_super_fc);
    1116             : 
    1117           0 : static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
    1118             : {
    1119           0 :         return sb->s_fs_info == fc->s_fs_info;
    1120             : }
    1121             : 
    1122           0 : static int test_single_super(struct super_block *s, struct fs_context *fc)
    1123             : {
    1124           0 :         return 1;
    1125             : }
    1126             : 
    1127          27 : static int vfs_get_super(struct fs_context *fc, bool reconf,
    1128             :                 int (*test)(struct super_block *, struct fs_context *),
    1129             :                 int (*fill_super)(struct super_block *sb,
    1130             :                                   struct fs_context *fc))
    1131             : {
    1132             :         struct super_block *sb;
    1133             :         int err;
    1134             : 
    1135          27 :         sb = sget_fc(fc, test, set_anon_super_fc);
    1136          27 :         if (IS_ERR(sb))
    1137           0 :                 return PTR_ERR(sb);
    1138             : 
    1139          27 :         if (!sb->s_root) {
    1140          27 :                 err = fill_super(sb, fc);
    1141          27 :                 if (err)
    1142             :                         goto error;
    1143             : 
    1144          27 :                 sb->s_flags |= SB_ACTIVE;
    1145          54 :                 fc->root = dget(sb->s_root);
    1146             :         } else {
    1147           0 :                 fc->root = dget(sb->s_root);
    1148           0 :                 if (reconf) {
    1149           0 :                         err = reconfigure_super(fc);
    1150           0 :                         if (err < 0) {
    1151           0 :                                 dput(fc->root);
    1152           0 :                                 fc->root = NULL;
    1153           0 :                                 goto error;
    1154             :                         }
    1155             :                 }
    1156             :         }
    1157             : 
    1158             :         return 0;
    1159             : 
    1160             : error:
    1161           0 :         deactivate_locked_super(sb);
    1162           0 :         return err;
    1163             : }
    1164             : 
    1165          27 : int get_tree_nodev(struct fs_context *fc,
    1166             :                   int (*fill_super)(struct super_block *sb,
    1167             :                                     struct fs_context *fc))
    1168             : {
    1169          27 :         return vfs_get_super(fc, false, NULL, fill_super);
    1170             : }
    1171             : EXPORT_SYMBOL(get_tree_nodev);
    1172             : 
    1173           0 : int get_tree_single(struct fs_context *fc,
    1174             :                   int (*fill_super)(struct super_block *sb,
    1175             :                                     struct fs_context *fc))
    1176             : {
    1177           0 :         return vfs_get_super(fc, false, test_single_super, fill_super);
    1178             : }
    1179             : EXPORT_SYMBOL(get_tree_single);
    1180             : 
    1181           0 : int get_tree_single_reconf(struct fs_context *fc,
    1182             :                   int (*fill_super)(struct super_block *sb,
    1183             :                                     struct fs_context *fc))
    1184             : {
    1185           0 :         return vfs_get_super(fc, true, test_single_super, fill_super);
    1186             : }
    1187             : EXPORT_SYMBOL(get_tree_single_reconf);
    1188             : 
    1189           0 : int get_tree_keyed(struct fs_context *fc,
    1190             :                   int (*fill_super)(struct super_block *sb,
    1191             :                                     struct fs_context *fc),
    1192             :                 void *key)
    1193             : {
    1194           0 :         fc->s_fs_info = key;
    1195           0 :         return vfs_get_super(fc, false, test_keyed_super, fill_super);
    1196             : }
    1197             : EXPORT_SYMBOL(get_tree_keyed);
    1198             : 
    1199             : #ifdef CONFIG_BLOCK
    1200             : 
    1201           0 : static int set_bdev_super(struct super_block *s, void *data)
    1202             : {
    1203           0 :         s->s_bdev = data;
    1204           0 :         s->s_dev = s->s_bdev->bd_dev;
    1205           0 :         s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi);
    1206             : 
    1207           0 :         if (bdev_stable_writes(s->s_bdev))
    1208           0 :                 s->s_iflags |= SB_I_STABLE_WRITES;
    1209           0 :         return 0;
    1210             : }
    1211             : 
    1212           0 : static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
    1213             : {
    1214           0 :         return set_bdev_super(s, fc->sget_key);
    1215             : }
    1216             : 
    1217           0 : static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
    1218             : {
    1219           0 :         return !(s->s_iflags & SB_I_RETIRED) && s->s_bdev == fc->sget_key;
    1220             : }
    1221             : 
    1222             : /**
    1223             :  * get_tree_bdev - Get a superblock based on a single block device
    1224             :  * @fc: The filesystem context holding the parameters
    1225             :  * @fill_super: Helper to initialise a new superblock
    1226             :  */
    1227           0 : int get_tree_bdev(struct fs_context *fc,
    1228             :                 int (*fill_super)(struct super_block *,
    1229             :                                   struct fs_context *))
    1230             : {
    1231             :         struct block_device *bdev;
    1232             :         struct super_block *s;
    1233           0 :         fmode_t mode = FMODE_READ | FMODE_EXCL;
    1234           0 :         int error = 0;
    1235             : 
    1236           0 :         if (!(fc->sb_flags & SB_RDONLY))
    1237           0 :                 mode |= FMODE_WRITE;
    1238             : 
    1239           0 :         if (!fc->source)
    1240           0 :                 return invalf(fc, "No source specified");
    1241             : 
    1242           0 :         bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
    1243           0 :         if (IS_ERR(bdev)) {
    1244           0 :                 errorf(fc, "%s: Can't open blockdev", fc->source);
    1245           0 :                 return PTR_ERR(bdev);
    1246             :         }
    1247             : 
    1248             :         /* Once the superblock is inserted into the list by sget_fc(), s_umount
    1249             :          * will protect the lockfs code from trying to start a snapshot while
    1250             :          * we are mounting
    1251             :          */
    1252           0 :         mutex_lock(&bdev->bd_fsfreeze_mutex);
    1253           0 :         if (bdev->bd_fsfreeze_count > 0) {
    1254           0 :                 mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1255           0 :                 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
    1256           0 :                 blkdev_put(bdev, mode);
    1257           0 :                 return -EBUSY;
    1258             :         }
    1259             : 
    1260           0 :         fc->sb_flags |= SB_NOSEC;
    1261           0 :         fc->sget_key = bdev;
    1262           0 :         s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
    1263           0 :         mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1264           0 :         if (IS_ERR(s)) {
    1265           0 :                 blkdev_put(bdev, mode);
    1266           0 :                 return PTR_ERR(s);
    1267             :         }
    1268             : 
    1269           0 :         if (s->s_root) {
    1270             :                 /* Don't summarily change the RO/RW state. */
    1271           0 :                 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
    1272           0 :                         warnf(fc, "%pg: Can't mount, would change RO state", bdev);
    1273           0 :                         deactivate_locked_super(s);
    1274           0 :                         blkdev_put(bdev, mode);
    1275           0 :                         return -EBUSY;
    1276             :                 }
    1277             : 
    1278             :                 /*
    1279             :                  * s_umount nests inside open_mutex during
    1280             :                  * __invalidate_device().  blkdev_put() acquires
    1281             :                  * open_mutex and can't be called under s_umount.  Drop
    1282             :                  * s_umount temporarily.  This is safe as we're
    1283             :                  * holding an active reference.
    1284             :                  */
    1285           0 :                 up_write(&s->s_umount);
    1286           0 :                 blkdev_put(bdev, mode);
    1287           0 :                 down_write(&s->s_umount);
    1288             :         } else {
    1289           0 :                 s->s_mode = mode;
    1290           0 :                 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
    1291           0 :                 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
    1292             :                                         fc->fs_type->name, s->s_id);
    1293           0 :                 sb_set_blocksize(s, block_size(bdev));
    1294           0 :                 error = fill_super(s, fc);
    1295           0 :                 if (error) {
    1296           0 :                         deactivate_locked_super(s);
    1297           0 :                         return error;
    1298             :                 }
    1299             : 
    1300           0 :                 s->s_flags |= SB_ACTIVE;
    1301           0 :                 bdev->bd_super = s;
    1302             :         }
    1303             : 
    1304           0 :         BUG_ON(fc->root);
    1305           0 :         fc->root = dget(s->s_root);
    1306           0 :         return 0;
    1307             : }
    1308             : EXPORT_SYMBOL(get_tree_bdev);
    1309             : 
    1310           0 : static int test_bdev_super(struct super_block *s, void *data)
    1311             : {
    1312           0 :         return !(s->s_iflags & SB_I_RETIRED) && (void *)s->s_bdev == data;
    1313             : }
    1314             : 
    1315           0 : struct dentry *mount_bdev(struct file_system_type *fs_type,
    1316             :         int flags, const char *dev_name, void *data,
    1317             :         int (*fill_super)(struct super_block *, void *, int))
    1318             : {
    1319             :         struct block_device *bdev;
    1320             :         struct super_block *s;
    1321           0 :         fmode_t mode = FMODE_READ | FMODE_EXCL;
    1322           0 :         int error = 0;
    1323             : 
    1324           0 :         if (!(flags & SB_RDONLY))
    1325           0 :                 mode |= FMODE_WRITE;
    1326             : 
    1327           0 :         bdev = blkdev_get_by_path(dev_name, mode, fs_type);
    1328           0 :         if (IS_ERR(bdev))
    1329             :                 return ERR_CAST(bdev);
    1330             : 
    1331             :         /*
    1332             :          * once the super is inserted into the list by sget, s_umount
    1333             :          * will protect the lockfs code from trying to start a snapshot
    1334             :          * while we are mounting
    1335             :          */
    1336           0 :         mutex_lock(&bdev->bd_fsfreeze_mutex);
    1337           0 :         if (bdev->bd_fsfreeze_count > 0) {
    1338           0 :                 mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1339           0 :                 error = -EBUSY;
    1340           0 :                 goto error_bdev;
    1341             :         }
    1342           0 :         s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
    1343             :                  bdev);
    1344           0 :         mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1345           0 :         if (IS_ERR(s))
    1346             :                 goto error_s;
    1347             : 
    1348           0 :         if (s->s_root) {
    1349           0 :                 if ((flags ^ s->s_flags) & SB_RDONLY) {
    1350           0 :                         deactivate_locked_super(s);
    1351           0 :                         error = -EBUSY;
    1352           0 :                         goto error_bdev;
    1353             :                 }
    1354             : 
    1355             :                 /*
    1356             :                  * s_umount nests inside open_mutex during
    1357             :                  * __invalidate_device().  blkdev_put() acquires
    1358             :                  * open_mutex and can't be called under s_umount.  Drop
    1359             :                  * s_umount temporarily.  This is safe as we're
    1360             :                  * holding an active reference.
    1361             :                  */
    1362           0 :                 up_write(&s->s_umount);
    1363           0 :                 blkdev_put(bdev, mode);
    1364           0 :                 down_write(&s->s_umount);
    1365             :         } else {
    1366           0 :                 s->s_mode = mode;
    1367           0 :                 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
    1368           0 :                 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
    1369             :                                         fs_type->name, s->s_id);
    1370           0 :                 sb_set_blocksize(s, block_size(bdev));
    1371           0 :                 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
    1372           0 :                 if (error) {
    1373           0 :                         deactivate_locked_super(s);
    1374           0 :                         goto error;
    1375             :                 }
    1376             : 
    1377           0 :                 s->s_flags |= SB_ACTIVE;
    1378           0 :                 bdev->bd_super = s;
    1379             :         }
    1380             : 
    1381           0 :         return dget(s->s_root);
    1382             : 
    1383             : error_s:
    1384           0 :         error = PTR_ERR(s);
    1385             : error_bdev:
    1386           0 :         blkdev_put(bdev, mode);
    1387             : error:
    1388           0 :         return ERR_PTR(error);
    1389             : }
    1390             : EXPORT_SYMBOL(mount_bdev);
    1391             : 
    1392           0 : void kill_block_super(struct super_block *sb)
    1393             : {
    1394           0 :         struct block_device *bdev = sb->s_bdev;
    1395           0 :         fmode_t mode = sb->s_mode;
    1396             : 
    1397           0 :         bdev->bd_super = NULL;
    1398           0 :         generic_shutdown_super(sb);
    1399           0 :         sync_blockdev(bdev);
    1400           0 :         WARN_ON_ONCE(!(mode & FMODE_EXCL));
    1401           0 :         blkdev_put(bdev, mode | FMODE_EXCL);
    1402           0 : }
    1403             : 
    1404             : EXPORT_SYMBOL(kill_block_super);
    1405             : #endif
    1406             : 
    1407           0 : struct dentry *mount_nodev(struct file_system_type *fs_type,
    1408             :         int flags, void *data,
    1409             :         int (*fill_super)(struct super_block *, void *, int))
    1410             : {
    1411             :         int error;
    1412           0 :         struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
    1413             : 
    1414           0 :         if (IS_ERR(s))
    1415             :                 return ERR_CAST(s);
    1416             : 
    1417           0 :         error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
    1418           0 :         if (error) {
    1419           0 :                 deactivate_locked_super(s);
    1420           0 :                 return ERR_PTR(error);
    1421             :         }
    1422           0 :         s->s_flags |= SB_ACTIVE;
    1423           0 :         return dget(s->s_root);
    1424             : }
    1425             : EXPORT_SYMBOL(mount_nodev);
    1426             : 
    1427           0 : int reconfigure_single(struct super_block *s,
    1428             :                        int flags, void *data)
    1429             : {
    1430             :         struct fs_context *fc;
    1431             :         int ret;
    1432             : 
    1433             :         /* The caller really need to be passing fc down into mount_single(),
    1434             :          * then a chunk of this can be removed.  [Bollocks -- AV]
    1435             :          * Better yet, reconfiguration shouldn't happen, but rather the second
    1436             :          * mount should be rejected if the parameters are not compatible.
    1437             :          */
    1438           0 :         fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
    1439           0 :         if (IS_ERR(fc))
    1440           0 :                 return PTR_ERR(fc);
    1441             : 
    1442           0 :         ret = parse_monolithic_mount_data(fc, data);
    1443           0 :         if (ret < 0)
    1444             :                 goto out;
    1445             : 
    1446           0 :         ret = reconfigure_super(fc);
    1447             : out:
    1448           0 :         put_fs_context(fc);
    1449           0 :         return ret;
    1450             : }
    1451             : 
    1452           0 : static int compare_single(struct super_block *s, void *p)
    1453             : {
    1454           0 :         return 1;
    1455             : }
    1456             : 
    1457           0 : struct dentry *mount_single(struct file_system_type *fs_type,
    1458             :         int flags, void *data,
    1459             :         int (*fill_super)(struct super_block *, void *, int))
    1460             : {
    1461             :         struct super_block *s;
    1462             :         int error;
    1463             : 
    1464           0 :         s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
    1465           0 :         if (IS_ERR(s))
    1466             :                 return ERR_CAST(s);
    1467           0 :         if (!s->s_root) {
    1468           0 :                 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
    1469           0 :                 if (!error)
    1470           0 :                         s->s_flags |= SB_ACTIVE;
    1471             :         } else {
    1472           0 :                 error = reconfigure_single(s, flags, data);
    1473             :         }
    1474           0 :         if (unlikely(error)) {
    1475           0 :                 deactivate_locked_super(s);
    1476           0 :                 return ERR_PTR(error);
    1477             :         }
    1478           0 :         return dget(s->s_root);
    1479             : }
    1480             : EXPORT_SYMBOL(mount_single);
    1481             : 
    1482             : /**
    1483             :  * vfs_get_tree - Get the mountable root
    1484             :  * @fc: The superblock configuration context.
    1485             :  *
    1486             :  * The filesystem is invoked to get or create a superblock which can then later
    1487             :  * be used for mounting.  The filesystem places a pointer to the root to be
    1488             :  * used for mounting in @fc->root.
    1489             :  */
    1490          27 : int vfs_get_tree(struct fs_context *fc)
    1491             : {
    1492             :         struct super_block *sb;
    1493             :         int error;
    1494             : 
    1495          27 :         if (fc->root)
    1496             :                 return -EBUSY;
    1497             : 
    1498             :         /* Get the mountable root in fc->root, with a ref on the root and a ref
    1499             :          * on the superblock.
    1500             :          */
    1501          27 :         error = fc->ops->get_tree(fc);
    1502          27 :         if (error < 0)
    1503             :                 return error;
    1504             : 
    1505          27 :         if (!fc->root) {
    1506           0 :                 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
    1507             :                        fc->fs_type->name);
    1508             :                 /* We don't know what the locking state of the superblock is -
    1509             :                  * if there is a superblock.
    1510             :                  */
    1511           0 :                 BUG();
    1512             :         }
    1513             : 
    1514          27 :         sb = fc->root->d_sb;
    1515          27 :         WARN_ON(!sb->s_bdi);
    1516             : 
    1517             :         /*
    1518             :          * Write barrier is for super_cache_count(). We place it before setting
    1519             :          * SB_BORN as the data dependency between the two functions is the
    1520             :          * superblock structure contents that we just set up, not the SB_BORN
    1521             :          * flag.
    1522             :          */
    1523          27 :         smp_wmb();
    1524          27 :         sb->s_flags |= SB_BORN;
    1525             : 
    1526          27 :         error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
    1527             :         if (unlikely(error)) {
    1528             :                 fc_drop_locked(fc);
    1529             :                 return error;
    1530             :         }
    1531             : 
    1532             :         /*
    1533             :          * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
    1534             :          * but s_maxbytes was an unsigned long long for many releases. Throw
    1535             :          * this warning for a little while to try and catch filesystems that
    1536             :          * violate this rule.
    1537             :          */
    1538          27 :         WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
    1539             :                 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
    1540             : 
    1541             :         return 0;
    1542             : }
    1543             : EXPORT_SYMBOL(vfs_get_tree);
    1544             : 
    1545             : /*
    1546             :  * Setup private BDI for given superblock. It gets automatically cleaned up
    1547             :  * in generic_shutdown_super().
    1548             :  */
    1549           0 : int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
    1550             : {
    1551             :         struct backing_dev_info *bdi;
    1552             :         int err;
    1553             :         va_list args;
    1554             : 
    1555           0 :         bdi = bdi_alloc(NUMA_NO_NODE);
    1556           0 :         if (!bdi)
    1557             :                 return -ENOMEM;
    1558             : 
    1559           0 :         va_start(args, fmt);
    1560           0 :         err = bdi_register_va(bdi, fmt, args);
    1561           0 :         va_end(args);
    1562           0 :         if (err) {
    1563           0 :                 bdi_put(bdi);
    1564           0 :                 return err;
    1565             :         }
    1566           0 :         WARN_ON(sb->s_bdi != &noop_backing_dev_info);
    1567           0 :         sb->s_bdi = bdi;
    1568           0 :         sb->s_iflags |= SB_I_PERSB_BDI;
    1569             : 
    1570           0 :         return 0;
    1571             : }
    1572             : EXPORT_SYMBOL(super_setup_bdi_name);
    1573             : 
    1574             : /*
    1575             :  * Setup private BDI for given superblock. I gets automatically cleaned up
    1576             :  * in generic_shutdown_super().
    1577             :  */
    1578           0 : int super_setup_bdi(struct super_block *sb)
    1579             : {
    1580             :         static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
    1581             : 
    1582           0 :         return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
    1583             :                                     atomic_long_inc_return(&bdi_seq));
    1584             : }
    1585             : EXPORT_SYMBOL(super_setup_bdi);
    1586             : 
    1587             : /**
    1588             :  * sb_wait_write - wait until all writers to given file system finish
    1589             :  * @sb: the super for which we wait
    1590             :  * @level: type of writers we wait for (normal vs page fault)
    1591             :  *
    1592             :  * This function waits until there are no writers of given type to given file
    1593             :  * system.
    1594             :  */
    1595             : static void sb_wait_write(struct super_block *sb, int level)
    1596             : {
    1597           0 :         percpu_down_write(sb->s_writers.rw_sem + level-1);
    1598             : }
    1599             : 
    1600             : /*
    1601             :  * We are going to return to userspace and forget about these locks, the
    1602             :  * ownership goes to the caller of thaw_super() which does unlock().
    1603             :  */
    1604             : static void lockdep_sb_freeze_release(struct super_block *sb)
    1605             : {
    1606             :         int level;
    1607             : 
    1608           0 :         for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
    1609             :                 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
    1610             : }
    1611             : 
    1612             : /*
    1613             :  * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
    1614             :  */
    1615             : static void lockdep_sb_freeze_acquire(struct super_block *sb)
    1616             : {
    1617             :         int level;
    1618             : 
    1619           0 :         for (level = 0; level < SB_FREEZE_LEVELS; ++level)
    1620             :                 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
    1621             : }
    1622             : 
    1623             : static void sb_freeze_unlock(struct super_block *sb, int level)
    1624             : {
    1625           0 :         for (level--; level >= 0; level--)
    1626           0 :                 percpu_up_write(sb->s_writers.rw_sem + level);
    1627             : }
    1628             : 
    1629             : /**
    1630             :  * freeze_super - lock the filesystem and force it into a consistent state
    1631             :  * @sb: the super to lock
    1632             :  *
    1633             :  * Syncs the super to make sure the filesystem is consistent and calls the fs's
    1634             :  * freeze_fs.  Subsequent calls to this without first thawing the fs will return
    1635             :  * -EBUSY.
    1636             :  *
    1637             :  * During this function, sb->s_writers.frozen goes through these values:
    1638             :  *
    1639             :  * SB_UNFROZEN: File system is normal, all writes progress as usual.
    1640             :  *
    1641             :  * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
    1642             :  * writes should be blocked, though page faults are still allowed. We wait for
    1643             :  * all writes to complete and then proceed to the next stage.
    1644             :  *
    1645             :  * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
    1646             :  * but internal fs threads can still modify the filesystem (although they
    1647             :  * should not dirty new pages or inodes), writeback can run etc. After waiting
    1648             :  * for all running page faults we sync the filesystem which will clean all
    1649             :  * dirty pages and inodes (no new dirty pages or inodes can be created when
    1650             :  * sync is running).
    1651             :  *
    1652             :  * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
    1653             :  * modification are blocked (e.g. XFS preallocation truncation on inode
    1654             :  * reclaim). This is usually implemented by blocking new transactions for
    1655             :  * filesystems that have them and need this additional guard. After all
    1656             :  * internal writers are finished we call ->freeze_fs() to finish filesystem
    1657             :  * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
    1658             :  * mostly auxiliary for filesystems to verify they do not modify frozen fs.
    1659             :  *
    1660             :  * sb->s_writers.frozen is protected by sb->s_umount.
    1661             :  */
    1662           0 : int freeze_super(struct super_block *sb)
    1663             : {
    1664             :         int ret;
    1665             : 
    1666           0 :         atomic_inc(&sb->s_active);
    1667           0 :         down_write(&sb->s_umount);
    1668           0 :         if (sb->s_writers.frozen != SB_UNFROZEN) {
    1669           0 :                 deactivate_locked_super(sb);
    1670           0 :                 return -EBUSY;
    1671             :         }
    1672             : 
    1673           0 :         if (!(sb->s_flags & SB_BORN)) {
    1674           0 :                 up_write(&sb->s_umount);
    1675           0 :                 return 0;       /* sic - it's "nothing to do" */
    1676             :         }
    1677             : 
    1678           0 :         if (sb_rdonly(sb)) {
    1679             :                 /* Nothing to do really... */
    1680           0 :                 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
    1681           0 :                 up_write(&sb->s_umount);
    1682           0 :                 return 0;
    1683             :         }
    1684             : 
    1685           0 :         sb->s_writers.frozen = SB_FREEZE_WRITE;
    1686             :         /* Release s_umount to preserve sb_start_write -> s_umount ordering */
    1687           0 :         up_write(&sb->s_umount);
    1688           0 :         sb_wait_write(sb, SB_FREEZE_WRITE);
    1689           0 :         down_write(&sb->s_umount);
    1690             : 
    1691             :         /* Now we go and block page faults... */
    1692           0 :         sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
    1693           0 :         sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
    1694             : 
    1695             :         /* All writers are done so after syncing there won't be dirty data */
    1696           0 :         ret = sync_filesystem(sb);
    1697           0 :         if (ret) {
    1698           0 :                 sb->s_writers.frozen = SB_UNFROZEN;
    1699           0 :                 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
    1700           0 :                 wake_up(&sb->s_writers.wait_unfrozen);
    1701           0 :                 deactivate_locked_super(sb);
    1702           0 :                 return ret;
    1703             :         }
    1704             : 
    1705             :         /* Now wait for internal filesystem counter */
    1706           0 :         sb->s_writers.frozen = SB_FREEZE_FS;
    1707           0 :         sb_wait_write(sb, SB_FREEZE_FS);
    1708             : 
    1709           0 :         if (sb->s_op->freeze_fs) {
    1710           0 :                 ret = sb->s_op->freeze_fs(sb);
    1711           0 :                 if (ret) {
    1712           0 :                         printk(KERN_ERR
    1713             :                                 "VFS:Filesystem freeze failed\n");
    1714           0 :                         sb->s_writers.frozen = SB_UNFROZEN;
    1715           0 :                         sb_freeze_unlock(sb, SB_FREEZE_FS);
    1716           0 :                         wake_up(&sb->s_writers.wait_unfrozen);
    1717           0 :                         deactivate_locked_super(sb);
    1718           0 :                         return ret;
    1719             :                 }
    1720             :         }
    1721             :         /*
    1722             :          * For debugging purposes so that fs can warn if it sees write activity
    1723             :          * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
    1724             :          */
    1725           0 :         sb->s_writers.frozen = SB_FREEZE_COMPLETE;
    1726           0 :         lockdep_sb_freeze_release(sb);
    1727           0 :         up_write(&sb->s_umount);
    1728           0 :         return 0;
    1729             : }
    1730             : EXPORT_SYMBOL(freeze_super);
    1731             : 
    1732           0 : static int thaw_super_locked(struct super_block *sb)
    1733             : {
    1734             :         int error;
    1735             : 
    1736           0 :         if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
    1737           0 :                 up_write(&sb->s_umount);
    1738           0 :                 return -EINVAL;
    1739             :         }
    1740             : 
    1741           0 :         if (sb_rdonly(sb)) {
    1742           0 :                 sb->s_writers.frozen = SB_UNFROZEN;
    1743           0 :                 goto out;
    1744             :         }
    1745             : 
    1746           0 :         lockdep_sb_freeze_acquire(sb);
    1747             : 
    1748           0 :         if (sb->s_op->unfreeze_fs) {
    1749           0 :                 error = sb->s_op->unfreeze_fs(sb);
    1750           0 :                 if (error) {
    1751           0 :                         printk(KERN_ERR
    1752             :                                 "VFS:Filesystem thaw failed\n");
    1753           0 :                         lockdep_sb_freeze_release(sb);
    1754           0 :                         up_write(&sb->s_umount);
    1755           0 :                         return error;
    1756             :                 }
    1757             :         }
    1758             : 
    1759           0 :         sb->s_writers.frozen = SB_UNFROZEN;
    1760             :         sb_freeze_unlock(sb, SB_FREEZE_FS);
    1761             : out:
    1762           0 :         wake_up(&sb->s_writers.wait_unfrozen);
    1763           0 :         deactivate_locked_super(sb);
    1764           0 :         return 0;
    1765             : }
    1766             : 
    1767             : /**
    1768             :  * thaw_super -- unlock filesystem
    1769             :  * @sb: the super to thaw
    1770             :  *
    1771             :  * Unlocks the filesystem and marks it writeable again after freeze_super().
    1772             :  */
    1773           0 : int thaw_super(struct super_block *sb)
    1774             : {
    1775           0 :         down_write(&sb->s_umount);
    1776           0 :         return thaw_super_locked(sb);
    1777             : }
    1778             : EXPORT_SYMBOL(thaw_super);
    1779             : 
    1780             : /*
    1781             :  * Create workqueue for deferred direct IO completions. We allocate the
    1782             :  * workqueue when it's first needed. This avoids creating workqueue for
    1783             :  * filesystems that don't need it and also allows us to create the workqueue
    1784             :  * late enough so the we can include s_id in the name of the workqueue.
    1785             :  */
    1786           0 : int sb_init_dio_done_wq(struct super_block *sb)
    1787             : {
    1788             :         struct workqueue_struct *old;
    1789           0 :         struct workqueue_struct *wq = alloc_workqueue("dio/%s",
    1790             :                                                       WQ_MEM_RECLAIM, 0,
    1791           0 :                                                       sb->s_id);
    1792           0 :         if (!wq)
    1793             :                 return -ENOMEM;
    1794             :         /*
    1795             :          * This has to be atomic as more DIOs can race to create the workqueue
    1796             :          */
    1797           0 :         old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
    1798             :         /* Someone created workqueue before us? Free ours... */
    1799           0 :         if (old)
    1800           0 :                 destroy_workqueue(wq);
    1801             :         return 0;
    1802             : }

Generated by: LCOV version 1.14