LCOV - code coverage report
Current view: top level - fs - super.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 155 736 21.1 %
Date: 2023-07-19 18:55:55 Functions: 14 63 22.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *  linux/fs/super.c
       4             :  *
       5             :  *  Copyright (C) 1991, 1992  Linus Torvalds
       6             :  *
       7             :  *  super.c contains code to handle: - mount structures
       8             :  *                                   - super-block tables
       9             :  *                                   - filesystem drivers list
      10             :  *                                   - mount system call
      11             :  *                                   - umount system call
      12             :  *                                   - ustat system call
      13             :  *
      14             :  * GK 2/5/95  -  Changed to support mounting the root fs via NFS
      15             :  *
      16             :  *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
      17             :  *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
      18             :  *  Added options to /proc/mounts:
      19             :  *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
      20             :  *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
      21             :  *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
      22             :  */
      23             : 
      24             : #include <linux/export.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/blkdev.h>
      27             : #include <linux/mount.h>
      28             : #include <linux/security.h>
      29             : #include <linux/writeback.h>              /* for the emergency remount stuff */
      30             : #include <linux/idr.h>
      31             : #include <linux/mutex.h>
      32             : #include <linux/backing-dev.h>
      33             : #include <linux/rculist_bl.h>
      34             : #include <linux/fscrypt.h>
      35             : #include <linux/fsnotify.h>
      36             : #include <linux/lockdep.h>
      37             : #include <linux/user_namespace.h>
      38             : #include <linux/fs_context.h>
      39             : #include <uapi/linux/mount.h>
      40             : #include "internal.h"
      41             : 
      42             : static int thaw_super_locked(struct super_block *sb);
      43             : 
      44             : static LIST_HEAD(super_blocks);
      45             : static DEFINE_SPINLOCK(sb_lock);
      46             : 
      47             : static char *sb_writers_name[SB_FREEZE_LEVELS] = {
      48             :         "sb_writers",
      49             :         "sb_pagefaults",
      50             :         "sb_internal",
      51             : };
      52             : 
      53             : /*
      54             :  * One thing we have to be careful of with a per-sb shrinker is that we don't
      55             :  * drop the last active reference to the superblock from within the shrinker.
      56             :  * If that happens we could trigger unregistering the shrinker from within the
      57             :  * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
      58             :  * take a passive reference to the superblock to avoid this from occurring.
      59             :  */
      60           0 : static unsigned long super_cache_scan(struct shrinker *shrink,
      61             :                                       struct shrink_control *sc)
      62             : {
      63             :         struct super_block *sb;
      64           0 :         long    fs_objects = 0;
      65             :         long    total_objects;
      66           0 :         long    freed = 0;
      67             :         long    dentries;
      68             :         long    inodes;
      69             : 
      70           0 :         sb = container_of(shrink, struct super_block, s_shrink);
      71             : 
      72             :         /*
      73             :          * Deadlock avoidance.  We may hold various FS locks, and we don't want
      74             :          * to recurse into the FS that called us in clear_inode() and friends..
      75             :          */
      76           0 :         if (!(sc->gfp_mask & __GFP_FS))
      77             :                 return SHRINK_STOP;
      78             : 
      79           0 :         if (!trylock_super(sb))
      80             :                 return SHRINK_STOP;
      81             : 
      82           0 :         if (sb->s_op->nr_cached_objects)
      83           0 :                 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
      84             : 
      85           0 :         inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
      86           0 :         dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
      87           0 :         total_objects = dentries + inodes + fs_objects + 1;
      88           0 :         if (!total_objects)
      89           0 :                 total_objects = 1;
      90             : 
      91             :         /* proportion the scan between the caches */
      92           0 :         dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
      93           0 :         inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
      94           0 :         fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
      95             : 
      96             :         /*
      97             :          * prune the dcache first as the icache is pinned by it, then
      98             :          * prune the icache, followed by the filesystem specific caches
      99             :          *
     100             :          * Ensure that we always scan at least one object - memcg kmem
     101             :          * accounting uses this to fully empty the caches.
     102             :          */
     103           0 :         sc->nr_to_scan = dentries + 1;
     104           0 :         freed = prune_dcache_sb(sb, sc);
     105           0 :         sc->nr_to_scan = inodes + 1;
     106           0 :         freed += prune_icache_sb(sb, sc);
     107             : 
     108           0 :         if (fs_objects) {
     109           0 :                 sc->nr_to_scan = fs_objects + 1;
     110           0 :                 freed += sb->s_op->free_cached_objects(sb, sc);
     111             :         }
     112             : 
     113           0 :         up_read(&sb->s_umount);
     114           0 :         return freed;
     115             : }
     116             : 
     117           0 : static unsigned long super_cache_count(struct shrinker *shrink,
     118             :                                        struct shrink_control *sc)
     119             : {
     120             :         struct super_block *sb;
     121           0 :         long    total_objects = 0;
     122             : 
     123           0 :         sb = container_of(shrink, struct super_block, s_shrink);
     124             : 
     125             :         /*
     126             :          * We don't call trylock_super() here as it is a scalability bottleneck,
     127             :          * so we're exposed to partial setup state. The shrinker rwsem does not
     128             :          * protect filesystem operations backing list_lru_shrink_count() or
     129             :          * s_op->nr_cached_objects(). Counts can change between
     130             :          * super_cache_count and super_cache_scan, so we really don't need locks
     131             :          * here.
     132             :          *
     133             :          * However, if we are currently mounting the superblock, the underlying
     134             :          * filesystem might be in a state of partial construction and hence it
     135             :          * is dangerous to access it.  trylock_super() uses a SB_BORN check to
     136             :          * avoid this situation, so do the same here. The memory barrier is
     137             :          * matched with the one in mount_fs() as we don't hold locks here.
     138             :          */
     139           0 :         if (!(sb->s_flags & SB_BORN))
     140             :                 return 0;
     141           0 :         smp_rmb();
     142             : 
     143           0 :         if (sb->s_op && sb->s_op->nr_cached_objects)
     144           0 :                 total_objects = sb->s_op->nr_cached_objects(sb, sc);
     145             : 
     146           0 :         total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
     147           0 :         total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
     148             : 
     149           0 :         if (!total_objects)
     150             :                 return SHRINK_EMPTY;
     151             : 
     152           0 :         total_objects = vfs_pressure_ratio(total_objects);
     153           0 :         return total_objects;
     154             : }
     155             : 
     156          22 : static void destroy_super_work(struct work_struct *work)
     157             : {
     158          22 :         struct super_block *s = container_of(work, struct super_block,
     159             :                                                         destroy_work);
     160             :         int i;
     161             : 
     162          88 :         for (i = 0; i < SB_FREEZE_LEVELS; i++)
     163          66 :                 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
     164          22 :         kfree(s);
     165          22 : }
     166             : 
     167          22 : static void destroy_super_rcu(struct rcu_head *head)
     168             : {
     169          22 :         struct super_block *s = container_of(head, struct super_block, rcu);
     170          44 :         INIT_WORK(&s->destroy_work, destroy_super_work);
     171          44 :         schedule_work(&s->destroy_work);
     172          22 : }
     173             : 
     174             : /* Free a superblock that has never been seen by anyone */
     175           0 : static void destroy_unused_super(struct super_block *s)
     176             : {
     177           0 :         if (!s)
     178             :                 return;
     179           0 :         up_write(&s->s_umount);
     180           0 :         list_lru_destroy(&s->s_dentry_lru);
     181           0 :         list_lru_destroy(&s->s_inode_lru);
     182           0 :         security_sb_free(s);
     183           0 :         put_user_ns(s->s_user_ns);
     184           0 :         kfree(s->s_subtype);
     185           0 :         free_prealloced_shrinker(&s->s_shrink);
     186             :         /* no delays needed */
     187           0 :         destroy_super_work(&s->destroy_work);
     188             : }
     189             : 
     190             : /**
     191             :  *      alloc_super     -       create new superblock
     192             :  *      @type:  filesystem type superblock should belong to
     193             :  *      @flags: the mount flags
     194             :  *      @user_ns: User namespace for the super_block
     195             :  *
     196             :  *      Allocates and initializes a new &struct super_block.  alloc_super()
     197             :  *      returns a pointer new superblock or %NULL if allocation had failed.
     198             :  */
     199          32 : static struct super_block *alloc_super(struct file_system_type *type, int flags,
     200             :                                        struct user_namespace *user_ns)
     201             : {
     202          32 :         struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
     203             :         static const struct super_operations default_op;
     204             :         int i;
     205             : 
     206          32 :         if (!s)
     207             :                 return NULL;
     208             : 
     209          64 :         INIT_LIST_HEAD(&s->s_mounts);
     210          32 :         s->s_user_ns = get_user_ns(user_ns);
     211          32 :         init_rwsem(&s->s_umount);
     212             :         lockdep_set_class(&s->s_umount, &type->s_umount_key);
     213             :         /*
     214             :          * sget() can have s_umount recursion.
     215             :          *
     216             :          * When it cannot find a suitable sb, it allocates a new
     217             :          * one (this one), and tries again to find a suitable old
     218             :          * one.
     219             :          *
     220             :          * In case that succeeds, it will acquire the s_umount
     221             :          * lock of the old one. Since these are clearly distrinct
     222             :          * locks, and this object isn't exposed yet, there's no
     223             :          * risk of deadlocks.
     224             :          *
     225             :          * Annotate this by putting this lock in a different
     226             :          * subclass.
     227             :          */
     228          32 :         down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
     229             : 
     230          32 :         if (security_sb_alloc(s))
     231             :                 goto fail;
     232             : 
     233         128 :         for (i = 0; i < SB_FREEZE_LEVELS; i++) {
     234         192 :                 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
     235          96 :                                         sb_writers_name[i],
     236             :                                         &type->s_writers_key[i]))
     237             :                         goto fail;
     238             :         }
     239          32 :         init_waitqueue_head(&s->s_writers.wait_unfrozen);
     240          32 :         s->s_bdi = &noop_backing_dev_info;
     241          32 :         s->s_flags = flags;
     242          32 :         if (s->s_user_ns != &init_user_ns)
     243           0 :                 s->s_iflags |= SB_I_NODEV;
     244          64 :         INIT_HLIST_NODE(&s->s_instances);
     245          32 :         INIT_HLIST_BL_HEAD(&s->s_roots);
     246          32 :         mutex_init(&s->s_sync_lock);
     247          64 :         INIT_LIST_HEAD(&s->s_inodes);
     248          32 :         spin_lock_init(&s->s_inode_list_lock);
     249          64 :         INIT_LIST_HEAD(&s->s_inodes_wb);
     250          32 :         spin_lock_init(&s->s_inode_wblist_lock);
     251             : 
     252          32 :         s->s_count = 1;
     253          64 :         atomic_set(&s->s_active, 1);
     254          32 :         mutex_init(&s->s_vfs_rename_mutex);
     255             :         lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
     256          32 :         init_rwsem(&s->s_dquot.dqio_sem);
     257          32 :         s->s_maxbytes = MAX_NON_LFS;
     258          32 :         s->s_op = &default_op;
     259          32 :         s->s_time_gran = 1000000000;
     260          32 :         s->s_time_min = TIME64_MIN;
     261          32 :         s->s_time_max = TIME64_MAX;
     262             : 
     263          32 :         s->s_shrink.seeks = DEFAULT_SEEKS;
     264          32 :         s->s_shrink.scan_objects = super_cache_scan;
     265          32 :         s->s_shrink.count_objects = super_cache_count;
     266          32 :         s->s_shrink.batch = 1024;
     267          32 :         s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
     268          32 :         if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name))
     269             :                 goto fail;
     270          32 :         if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
     271             :                 goto fail;
     272          32 :         if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
     273             :                 goto fail;
     274             :         return s;
     275             : 
     276             : fail:
     277           0 :         destroy_unused_super(s);
     278             :         return NULL;
     279             : }
     280             : 
     281             : /* Superblock refcounting  */
     282             : 
     283             : /*
     284             :  * Drop a superblock's refcount.  The caller must hold sb_lock.
     285             :  */
     286          22 : static void __put_super(struct super_block *s)
     287             : {
     288          22 :         if (!--s->s_count) {
     289          44 :                 list_del_init(&s->s_list);
     290          22 :                 WARN_ON(s->s_dentry_lru.node);
     291          22 :                 WARN_ON(s->s_inode_lru.node);
     292          22 :                 WARN_ON(!list_empty(&s->s_mounts));
     293          22 :                 security_sb_free(s);
     294          22 :                 put_user_ns(s->s_user_ns);
     295          22 :                 kfree(s->s_subtype);
     296          22 :                 call_rcu(&s->rcu, destroy_super_rcu);
     297             :         }
     298          22 : }
     299             : 
     300             : /**
     301             :  *      put_super       -       drop a temporary reference to superblock
     302             :  *      @sb: superblock in question
     303             :  *
     304             :  *      Drops a temporary reference, frees superblock if there's no
     305             :  *      references left.
     306             :  */
     307           0 : void put_super(struct super_block *sb)
     308             : {
     309          22 :         spin_lock(&sb_lock);
     310          22 :         __put_super(sb);
     311          22 :         spin_unlock(&sb_lock);
     312           0 : }
     313             : 
     314             : 
     315             : /**
     316             :  *      deactivate_locked_super -       drop an active reference to superblock
     317             :  *      @s: superblock to deactivate
     318             :  *
     319             :  *      Drops an active reference to superblock, converting it into a temporary
     320             :  *      one if there is no other active references left.  In that case we
     321             :  *      tell fs driver to shut it down and drop the temporary reference we
     322             :  *      had just acquired.
     323             :  *
     324             :  *      Caller holds exclusive lock on superblock; that lock is released.
     325             :  */
     326          22 : void deactivate_locked_super(struct super_block *s)
     327             : {
     328          22 :         struct file_system_type *fs = s->s_type;
     329          44 :         if (atomic_dec_and_test(&s->s_active)) {
     330          22 :                 unregister_shrinker(&s->s_shrink);
     331          22 :                 fs->kill_sb(s);
     332             : 
     333             :                 /*
     334             :                  * Since list_lru_destroy() may sleep, we cannot call it from
     335             :                  * put_super(), where we hold the sb_lock. Therefore we destroy
     336             :                  * the lru lists right now.
     337             :                  */
     338          22 :                 list_lru_destroy(&s->s_dentry_lru);
     339          22 :                 list_lru_destroy(&s->s_inode_lru);
     340             : 
     341          22 :                 put_filesystem(fs);
     342             :                 put_super(s);
     343             :         } else {
     344           0 :                 up_write(&s->s_umount);
     345             :         }
     346          22 : }
     347             : 
     348             : EXPORT_SYMBOL(deactivate_locked_super);
     349             : 
     350             : /**
     351             :  *      deactivate_super        -       drop an active reference to superblock
     352             :  *      @s: superblock to deactivate
     353             :  *
     354             :  *      Variant of deactivate_locked_super(), except that superblock is *not*
     355             :  *      locked by caller.  If we are going to drop the final active reference,
     356             :  *      lock will be acquired prior to that.
     357             :  */
     358          54 : void deactivate_super(struct super_block *s)
     359             : {
     360         108 :         if (!atomic_add_unless(&s->s_active, -1, 1)) {
     361          22 :                 down_write(&s->s_umount);
     362          22 :                 deactivate_locked_super(s);
     363             :         }
     364          54 : }
     365             : 
     366             : EXPORT_SYMBOL(deactivate_super);
     367             : 
     368             : /**
     369             :  *      grab_super - acquire an active reference
     370             :  *      @s: reference we are trying to make active
     371             :  *
     372             :  *      Tries to acquire an active reference.  grab_super() is used when we
     373             :  *      had just found a superblock in super_blocks or fs_type->fs_supers
     374             :  *      and want to turn it into a full-blown active reference.  grab_super()
     375             :  *      is called with sb_lock held and drops it.  Returns 1 in case of
     376             :  *      success, 0 if we had failed (superblock contents was already dead or
     377             :  *      dying when grab_super() had been called).  Note that this is only
     378             :  *      called for superblocks not in rundown mode (== ones still on ->fs_supers
     379             :  *      of their type), so increment of ->s_count is OK here.
     380             :  */
     381           0 : static int grab_super(struct super_block *s) __releases(sb_lock)
     382             : {
     383           0 :         s->s_count++;
     384           0 :         spin_unlock(&sb_lock);
     385           0 :         down_write(&s->s_umount);
     386           0 :         if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
     387           0 :                 put_super(s);
     388           0 :                 return 1;
     389             :         }
     390           0 :         up_write(&s->s_umount);
     391           0 :         put_super(s);
     392           0 :         return 0;
     393             : }
     394             : 
     395             : /*
     396             :  *      trylock_super - try to grab ->s_umount shared
     397             :  *      @sb: reference we are trying to grab
     398             :  *
     399             :  *      Try to prevent fs shutdown.  This is used in places where we
     400             :  *      cannot take an active reference but we need to ensure that the
     401             :  *      filesystem is not shut down while we are working on it. It returns
     402             :  *      false if we cannot acquire s_umount or if we lose the race and
     403             :  *      filesystem already got into shutdown, and returns true with the s_umount
     404             :  *      lock held in read mode in case of success. On successful return,
     405             :  *      the caller must drop the s_umount lock when done.
     406             :  *
     407             :  *      Note that unlike get_super() et.al. this one does *not* bump ->s_count.
     408             :  *      The reason why it's safe is that we are OK with doing trylock instead
     409             :  *      of down_read().  There's a couple of places that are OK with that, but
     410             :  *      it's very much not a general-purpose interface.
     411             :  */
     412           0 : bool trylock_super(struct super_block *sb)
     413             : {
     414           0 :         if (down_read_trylock(&sb->s_umount)) {
     415           0 :                 if (!hlist_unhashed(&sb->s_instances) &&
     416           0 :                     sb->s_root && (sb->s_flags & SB_BORN))
     417             :                         return true;
     418           0 :                 up_read(&sb->s_umount);
     419             :         }
     420             : 
     421             :         return false;
     422             : }
     423             : 
     424             : /**
     425             :  *      retire_super    -       prevents superblock from being reused
     426             :  *      @sb: superblock to retire
     427             :  *
     428             :  *      The function marks superblock to be ignored in superblock test, which
     429             :  *      prevents it from being reused for any new mounts.  If the superblock has
     430             :  *      a private bdi, it also unregisters it, but doesn't reduce the refcount
     431             :  *      of the superblock to prevent potential races.  The refcount is reduced
     432             :  *      by generic_shutdown_super().  The function can not be called
     433             :  *      concurrently with generic_shutdown_super().  It is safe to call the
     434             :  *      function multiple times, subsequent calls have no effect.
     435             :  *
     436             :  *      The marker will affect the re-use only for block-device-based
     437             :  *      superblocks.  Other superblocks will still get marked if this function
     438             :  *      is used, but that will not affect their reusability.
     439             :  */
     440           0 : void retire_super(struct super_block *sb)
     441             : {
     442           0 :         WARN_ON(!sb->s_bdev);
     443           0 :         down_write(&sb->s_umount);
     444           0 :         if (sb->s_iflags & SB_I_PERSB_BDI) {
     445           0 :                 bdi_unregister(sb->s_bdi);
     446           0 :                 sb->s_iflags &= ~SB_I_PERSB_BDI;
     447             :         }
     448           0 :         sb->s_iflags |= SB_I_RETIRED;
     449           0 :         up_write(&sb->s_umount);
     450           0 : }
     451             : EXPORT_SYMBOL(retire_super);
     452             : 
     453             : /**
     454             :  *      generic_shutdown_super  -       common helper for ->kill_sb()
     455             :  *      @sb: superblock to kill
     456             :  *
     457             :  *      generic_shutdown_super() does all fs-independent work on superblock
     458             :  *      shutdown.  Typical ->kill_sb() should pick all fs-specific objects
     459             :  *      that need destruction out of superblock, call generic_shutdown_super()
     460             :  *      and release aforementioned objects.  Note: dentries and inodes _are_
     461             :  *      taken care of and do not need specific handling.
     462             :  *
     463             :  *      Upon calling this function, the filesystem may no longer alter or
     464             :  *      rearrange the set of dentries belonging to this super_block, nor may it
     465             :  *      change the attachments of dentries to inodes.
     466             :  */
     467          22 : void generic_shutdown_super(struct super_block *sb)
     468             : {
     469          22 :         const struct super_operations *sop = sb->s_op;
     470             : 
     471          22 :         if (sb->s_root) {
     472          22 :                 shrink_dcache_for_umount(sb);
     473          22 :                 sync_filesystem(sb);
     474          22 :                 sb->s_flags &= ~SB_ACTIVE;
     475             : 
     476             :                 cgroup_writeback_umount();
     477             : 
     478             :                 /* Evict all inodes with zero refcount. */
     479          22 :                 evict_inodes(sb);
     480             : 
     481             :                 /*
     482             :                  * Clean up and evict any inodes that still have references due
     483             :                  * to fsnotify or the security policy.
     484             :                  */
     485          22 :                 fsnotify_sb_delete(sb);
     486          22 :                 security_sb_delete(sb);
     487             : 
     488             :                 /*
     489             :                  * Now that all potentially-encrypted inodes have been evicted,
     490             :                  * the fscrypt keyring can be destroyed.
     491             :                  */
     492          22 :                 fscrypt_destroy_keyring(sb);
     493             : 
     494          22 :                 if (sb->s_dio_done_wq) {
     495           0 :                         destroy_workqueue(sb->s_dio_done_wq);
     496           0 :                         sb->s_dio_done_wq = NULL;
     497             :                 }
     498             : 
     499          22 :                 if (sop->put_super)
     500           0 :                         sop->put_super(sb);
     501             : 
     502          44 :                 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
     503             :                                 "VFS: Busy inodes after unmount of %s (%s)",
     504             :                                 sb->s_id, sb->s_type->name)) {
     505             :                         /*
     506             :                          * Adding a proper bailout path here would be hard, but
     507             :                          * we can at least make it more likely that a later
     508             :                          * iput_final() or such crashes cleanly.
     509             :                          */
     510             :                         struct inode *inode;
     511             : 
     512           0 :                         spin_lock(&sb->s_inode_list_lock);
     513           0 :                         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
     514           0 :                                 inode->i_op = VFS_PTR_POISON;
     515           0 :                                 inode->i_sb = VFS_PTR_POISON;
     516           0 :                                 inode->i_mapping = VFS_PTR_POISON;
     517             :                         }
     518           0 :                         spin_unlock(&sb->s_inode_list_lock);
     519             :                 }
     520             :         }
     521          22 :         spin_lock(&sb_lock);
     522             :         /* should be initialized for __put_super_and_need_restart() */
     523          44 :         hlist_del_init(&sb->s_instances);
     524          22 :         spin_unlock(&sb_lock);
     525          22 :         up_write(&sb->s_umount);
     526          22 :         if (sb->s_bdi != &noop_backing_dev_info) {
     527           0 :                 if (sb->s_iflags & SB_I_PERSB_BDI)
     528           0 :                         bdi_unregister(sb->s_bdi);
     529           0 :                 bdi_put(sb->s_bdi);
     530           0 :                 sb->s_bdi = &noop_backing_dev_info;
     531             :         }
     532          22 : }
     533             : 
     534             : EXPORT_SYMBOL(generic_shutdown_super);
     535             : 
     536           0 : bool mount_capable(struct fs_context *fc)
     537             : {
     538           0 :         if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
     539           0 :                 return capable(CAP_SYS_ADMIN);
     540             :         else
     541           0 :                 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
     542             : }
     543             : 
     544             : /**
     545             :  * sget_fc - Find or create a superblock
     546             :  * @fc: Filesystem context.
     547             :  * @test: Comparison callback
     548             :  * @set: Setup callback
     549             :  *
     550             :  * Find or create a superblock using the parameters stored in the filesystem
     551             :  * context and the two callback functions.
     552             :  *
     553             :  * If an extant superblock is matched, then that will be returned with an
     554             :  * elevated reference count that the caller must transfer or discard.
     555             :  *
     556             :  * If no match is made, a new superblock will be allocated and basic
     557             :  * initialisation will be performed (s_type, s_fs_info and s_id will be set and
     558             :  * the set() callback will be invoked), the superblock will be published and it
     559             :  * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE
     560             :  * as yet unset.
     561             :  */
     562          32 : struct super_block *sget_fc(struct fs_context *fc,
     563             :                             int (*test)(struct super_block *, struct fs_context *),
     564             :                             int (*set)(struct super_block *, struct fs_context *))
     565             : {
     566          32 :         struct super_block *s = NULL;
     567             :         struct super_block *old;
     568          32 :         struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
     569             :         int err;
     570             : 
     571             : retry:
     572          64 :         spin_lock(&sb_lock);
     573          64 :         if (test) {
     574           0 :                 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
     575           0 :                         if (test(old, fc))
     576             :                                 goto share_extant_sb;
     577             :                 }
     578             :         }
     579          64 :         if (!s) {
     580          32 :                 spin_unlock(&sb_lock);
     581          32 :                 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
     582          32 :                 if (!s)
     583             :                         return ERR_PTR(-ENOMEM);
     584             :                 goto retry;
     585             :         }
     586             : 
     587          32 :         s->s_fs_info = fc->s_fs_info;
     588          32 :         err = set(s, fc);
     589          32 :         if (err) {
     590           0 :                 s->s_fs_info = NULL;
     591           0 :                 spin_unlock(&sb_lock);
     592           0 :                 destroy_unused_super(s);
     593           0 :                 return ERR_PTR(err);
     594             :         }
     595          32 :         fc->s_fs_info = NULL;
     596          32 :         s->s_type = fc->fs_type;
     597          32 :         s->s_iflags |= fc->s_iflags;
     598          32 :         strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
     599          64 :         list_add_tail(&s->s_list, &super_blocks);
     600          64 :         hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
     601          32 :         spin_unlock(&sb_lock);
     602          32 :         get_filesystem(s->s_type);
     603          32 :         register_shrinker_prepared(&s->s_shrink);
     604          32 :         return s;
     605             : 
     606             : share_extant_sb:
     607           0 :         if (user_ns != old->s_user_ns) {
     608           0 :                 spin_unlock(&sb_lock);
     609           0 :                 destroy_unused_super(s);
     610           0 :                 return ERR_PTR(-EBUSY);
     611             :         }
     612           0 :         if (!grab_super(old))
     613             :                 goto retry;
     614           0 :         destroy_unused_super(s);
     615           0 :         return old;
     616             : }
     617             : EXPORT_SYMBOL(sget_fc);
     618             : 
     619             : /**
     620             :  *      sget    -       find or create a superblock
     621             :  *      @type:    filesystem type superblock should belong to
     622             :  *      @test:    comparison callback
     623             :  *      @set:     setup callback
     624             :  *      @flags:   mount flags
     625             :  *      @data:    argument to each of them
     626             :  */
     627           0 : struct super_block *sget(struct file_system_type *type,
     628             :                         int (*test)(struct super_block *,void *),
     629             :                         int (*set)(struct super_block *,void *),
     630             :                         int flags,
     631             :                         void *data)
     632             : {
     633           0 :         struct user_namespace *user_ns = current_user_ns();
     634           0 :         struct super_block *s = NULL;
     635             :         struct super_block *old;
     636             :         int err;
     637             : 
     638             :         /* We don't yet pass the user namespace of the parent
     639             :          * mount through to here so always use &init_user_ns
     640             :          * until that changes.
     641             :          */
     642             :         if (flags & SB_SUBMOUNT)
     643             :                 user_ns = &init_user_ns;
     644             : 
     645             : retry:
     646           0 :         spin_lock(&sb_lock);
     647           0 :         if (test) {
     648           0 :                 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
     649           0 :                         if (!test(old, data))
     650           0 :                                 continue;
     651           0 :                         if (user_ns != old->s_user_ns) {
     652           0 :                                 spin_unlock(&sb_lock);
     653           0 :                                 destroy_unused_super(s);
     654           0 :                                 return ERR_PTR(-EBUSY);
     655             :                         }
     656           0 :                         if (!grab_super(old))
     657             :                                 goto retry;
     658           0 :                         destroy_unused_super(s);
     659           0 :                         return old;
     660             :                 }
     661             :         }
     662           0 :         if (!s) {
     663           0 :                 spin_unlock(&sb_lock);
     664           0 :                 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
     665           0 :                 if (!s)
     666             :                         return ERR_PTR(-ENOMEM);
     667             :                 goto retry;
     668             :         }
     669             : 
     670           0 :         err = set(s, data);
     671           0 :         if (err) {
     672           0 :                 spin_unlock(&sb_lock);
     673           0 :                 destroy_unused_super(s);
     674           0 :                 return ERR_PTR(err);
     675             :         }
     676           0 :         s->s_type = type;
     677           0 :         strlcpy(s->s_id, type->name, sizeof(s->s_id));
     678           0 :         list_add_tail(&s->s_list, &super_blocks);
     679           0 :         hlist_add_head(&s->s_instances, &type->fs_supers);
     680           0 :         spin_unlock(&sb_lock);
     681           0 :         get_filesystem(type);
     682           0 :         register_shrinker_prepared(&s->s_shrink);
     683           0 :         return s;
     684             : }
     685             : EXPORT_SYMBOL(sget);
     686             : 
     687           0 : void drop_super(struct super_block *sb)
     688             : {
     689           0 :         up_read(&sb->s_umount);
     690           0 :         put_super(sb);
     691           0 : }
     692             : 
     693             : EXPORT_SYMBOL(drop_super);
     694             : 
     695           0 : void drop_super_exclusive(struct super_block *sb)
     696             : {
     697           0 :         up_write(&sb->s_umount);
     698           0 :         put_super(sb);
     699           0 : }
     700             : EXPORT_SYMBOL(drop_super_exclusive);
     701             : 
     702           0 : static void __iterate_supers(void (*f)(struct super_block *))
     703             : {
     704           0 :         struct super_block *sb, *p = NULL;
     705             : 
     706           0 :         spin_lock(&sb_lock);
     707           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     708           0 :                 if (hlist_unhashed(&sb->s_instances))
     709           0 :                         continue;
     710           0 :                 sb->s_count++;
     711           0 :                 spin_unlock(&sb_lock);
     712             : 
     713           0 :                 f(sb);
     714             : 
     715           0 :                 spin_lock(&sb_lock);
     716           0 :                 if (p)
     717           0 :                         __put_super(p);
     718             :                 p = sb;
     719             :         }
     720           0 :         if (p)
     721           0 :                 __put_super(p);
     722           0 :         spin_unlock(&sb_lock);
     723           0 : }
     724             : /**
     725             :  *      iterate_supers - call function for all active superblocks
     726             :  *      @f: function to call
     727             :  *      @arg: argument to pass to it
     728             :  *
     729             :  *      Scans the superblock list and calls given function, passing it
     730             :  *      locked superblock and given argument.
     731             :  */
     732           0 : void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
     733             : {
     734           0 :         struct super_block *sb, *p = NULL;
     735             : 
     736           0 :         spin_lock(&sb_lock);
     737           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     738           0 :                 if (hlist_unhashed(&sb->s_instances))
     739           0 :                         continue;
     740           0 :                 sb->s_count++;
     741           0 :                 spin_unlock(&sb_lock);
     742             : 
     743           0 :                 down_read(&sb->s_umount);
     744           0 :                 if (sb->s_root && (sb->s_flags & SB_BORN))
     745           0 :                         f(sb, arg);
     746           0 :                 up_read(&sb->s_umount);
     747             : 
     748           0 :                 spin_lock(&sb_lock);
     749           0 :                 if (p)
     750           0 :                         __put_super(p);
     751             :                 p = sb;
     752             :         }
     753           0 :         if (p)
     754           0 :                 __put_super(p);
     755           0 :         spin_unlock(&sb_lock);
     756           0 : }
     757             : 
     758             : /**
     759             :  *      iterate_supers_type - call function for superblocks of given type
     760             :  *      @type: fs type
     761             :  *      @f: function to call
     762             :  *      @arg: argument to pass to it
     763             :  *
     764             :  *      Scans the superblock list and calls given function, passing it
     765             :  *      locked superblock and given argument.
     766             :  */
     767           0 : void iterate_supers_type(struct file_system_type *type,
     768             :         void (*f)(struct super_block *, void *), void *arg)
     769             : {
     770           0 :         struct super_block *sb, *p = NULL;
     771             : 
     772           0 :         spin_lock(&sb_lock);
     773           0 :         hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
     774           0 :                 sb->s_count++;
     775           0 :                 spin_unlock(&sb_lock);
     776             : 
     777           0 :                 down_read(&sb->s_umount);
     778           0 :                 if (sb->s_root && (sb->s_flags & SB_BORN))
     779           0 :                         f(sb, arg);
     780           0 :                 up_read(&sb->s_umount);
     781             : 
     782           0 :                 spin_lock(&sb_lock);
     783           0 :                 if (p)
     784           0 :                         __put_super(p);
     785           0 :                 p = sb;
     786             :         }
     787           0 :         if (p)
     788           0 :                 __put_super(p);
     789           0 :         spin_unlock(&sb_lock);
     790           0 : }
     791             : 
     792             : EXPORT_SYMBOL(iterate_supers_type);
     793             : 
     794             : /**
     795             :  * get_super - get the superblock of a device
     796             :  * @bdev: device to get the superblock for
     797             :  *
     798             :  * Scans the superblock list and finds the superblock of the file system
     799             :  * mounted on the device given. %NULL is returned if no match is found.
     800             :  */
     801           0 : struct super_block *get_super(struct block_device *bdev)
     802             : {
     803             :         struct super_block *sb;
     804             : 
     805           0 :         if (!bdev)
     806             :                 return NULL;
     807             : 
     808             :         spin_lock(&sb_lock);
     809             : rescan:
     810           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     811           0 :                 if (hlist_unhashed(&sb->s_instances))
     812           0 :                         continue;
     813           0 :                 if (sb->s_bdev == bdev) {
     814           0 :                         sb->s_count++;
     815           0 :                         spin_unlock(&sb_lock);
     816           0 :                         down_read(&sb->s_umount);
     817             :                         /* still alive? */
     818           0 :                         if (sb->s_root && (sb->s_flags & SB_BORN))
     819             :                                 return sb;
     820           0 :                         up_read(&sb->s_umount);
     821             :                         /* nope, got unmounted */
     822           0 :                         spin_lock(&sb_lock);
     823           0 :                         __put_super(sb);
     824           0 :                         goto rescan;
     825             :                 }
     826             :         }
     827           0 :         spin_unlock(&sb_lock);
     828           0 :         return NULL;
     829             : }
     830             : 
     831             : /**
     832             :  * get_active_super - get an active reference to the superblock of a device
     833             :  * @bdev: device to get the superblock for
     834             :  *
     835             :  * Scans the superblock list and finds the superblock of the file system
     836             :  * mounted on the device given.  Returns the superblock with an active
     837             :  * reference or %NULL if none was found.
     838             :  */
     839           0 : struct super_block *get_active_super(struct block_device *bdev)
     840             : {
     841             :         struct super_block *sb;
     842             : 
     843           0 :         if (!bdev)
     844             :                 return NULL;
     845             : 
     846             : restart:
     847           0 :         spin_lock(&sb_lock);
     848           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     849           0 :                 if (hlist_unhashed(&sb->s_instances))
     850           0 :                         continue;
     851           0 :                 if (sb->s_bdev == bdev) {
     852           0 :                         if (!grab_super(sb))
     853             :                                 goto restart;
     854           0 :                         up_write(&sb->s_umount);
     855           0 :                         return sb;
     856             :                 }
     857             :         }
     858           0 :         spin_unlock(&sb_lock);
     859           0 :         return NULL;
     860             : }
     861             : 
     862           0 : struct super_block *user_get_super(dev_t dev, bool excl)
     863             : {
     864             :         struct super_block *sb;
     865             : 
     866             :         spin_lock(&sb_lock);
     867             : rescan:
     868           0 :         list_for_each_entry(sb, &super_blocks, s_list) {
     869           0 :                 if (hlist_unhashed(&sb->s_instances))
     870           0 :                         continue;
     871           0 :                 if (sb->s_dev ==  dev) {
     872           0 :                         sb->s_count++;
     873           0 :                         spin_unlock(&sb_lock);
     874           0 :                         if (excl)
     875           0 :                                 down_write(&sb->s_umount);
     876             :                         else
     877           0 :                                 down_read(&sb->s_umount);
     878             :                         /* still alive? */
     879           0 :                         if (sb->s_root && (sb->s_flags & SB_BORN))
     880             :                                 return sb;
     881           0 :                         if (excl)
     882           0 :                                 up_write(&sb->s_umount);
     883             :                         else
     884           0 :                                 up_read(&sb->s_umount);
     885             :                         /* nope, got unmounted */
     886           0 :                         spin_lock(&sb_lock);
     887           0 :                         __put_super(sb);
     888           0 :                         goto rescan;
     889             :                 }
     890             :         }
     891           0 :         spin_unlock(&sb_lock);
     892           0 :         return NULL;
     893             : }
     894             : 
     895             : /**
     896             :  * reconfigure_super - asks filesystem to change superblock parameters
     897             :  * @fc: The superblock and configuration
     898             :  *
     899             :  * Alters the configuration parameters of a live superblock.
     900             :  */
     901           0 : int reconfigure_super(struct fs_context *fc)
     902             : {
     903           0 :         struct super_block *sb = fc->root->d_sb;
     904             :         int retval;
     905           0 :         bool remount_ro = false;
     906           0 :         bool force = fc->sb_flags & SB_FORCE;
     907             : 
     908           0 :         if (fc->sb_flags_mask & ~MS_RMT_MASK)
     909             :                 return -EINVAL;
     910           0 :         if (sb->s_writers.frozen != SB_UNFROZEN)
     911             :                 return -EBUSY;
     912             : 
     913           0 :         retval = security_sb_remount(sb, fc->security);
     914             :         if (retval)
     915             :                 return retval;
     916             : 
     917           0 :         if (fc->sb_flags_mask & SB_RDONLY) {
     918             : #ifdef CONFIG_BLOCK
     919           0 :                 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
     920           0 :                     bdev_read_only(sb->s_bdev))
     921             :                         return -EACCES;
     922             : #endif
     923             : 
     924           0 :                 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
     925             :         }
     926             : 
     927           0 :         if (remount_ro) {
     928           0 :                 if (!hlist_empty(&sb->s_pins)) {
     929           0 :                         up_write(&sb->s_umount);
     930           0 :                         group_pin_kill(&sb->s_pins);
     931           0 :                         down_write(&sb->s_umount);
     932           0 :                         if (!sb->s_root)
     933             :                                 return 0;
     934           0 :                         if (sb->s_writers.frozen != SB_UNFROZEN)
     935             :                                 return -EBUSY;
     936           0 :                         remount_ro = !sb_rdonly(sb);
     937             :                 }
     938             :         }
     939           0 :         shrink_dcache_sb(sb);
     940             : 
     941             :         /* If we are reconfiguring to RDONLY and current sb is read/write,
     942             :          * make sure there are no files open for writing.
     943             :          */
     944           0 :         if (remount_ro) {
     945           0 :                 if (force) {
     946           0 :                         sb->s_readonly_remount = 1;
     947           0 :                         smp_wmb();
     948             :                 } else {
     949           0 :                         retval = sb_prepare_remount_readonly(sb);
     950           0 :                         if (retval)
     951             :                                 return retval;
     952             :                 }
     953             :         }
     954             : 
     955           0 :         if (fc->ops->reconfigure) {
     956           0 :                 retval = fc->ops->reconfigure(fc);
     957           0 :                 if (retval) {
     958           0 :                         if (!force)
     959             :                                 goto cancel_readonly;
     960             :                         /* If forced remount, go ahead despite any errors */
     961           0 :                         WARN(1, "forced remount of a %s fs returned %i\n",
     962             :                              sb->s_type->name, retval);
     963             :                 }
     964             :         }
     965             : 
     966           0 :         WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
     967             :                                  (fc->sb_flags & fc->sb_flags_mask)));
     968             :         /* Needs to be ordered wrt mnt_is_readonly() */
     969           0 :         smp_wmb();
     970           0 :         sb->s_readonly_remount = 0;
     971             : 
     972             :         /*
     973             :          * Some filesystems modify their metadata via some other path than the
     974             :          * bdev buffer cache (eg. use a private mapping, or directories in
     975             :          * pagecache, etc). Also file data modifications go via their own
     976             :          * mappings. So If we try to mount readonly then copy the filesystem
     977             :          * from bdev, we could get stale data, so invalidate it to give a best
     978             :          * effort at coherency.
     979             :          */
     980           0 :         if (remount_ro && sb->s_bdev)
     981           0 :                 invalidate_bdev(sb->s_bdev);
     982             :         return 0;
     983             : 
     984             : cancel_readonly:
     985           0 :         sb->s_readonly_remount = 0;
     986           0 :         return retval;
     987             : }
     988             : 
     989           0 : static void do_emergency_remount_callback(struct super_block *sb)
     990             : {
     991           0 :         down_write(&sb->s_umount);
     992           0 :         if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
     993           0 :             !sb_rdonly(sb)) {
     994             :                 struct fs_context *fc;
     995             : 
     996           0 :                 fc = fs_context_for_reconfigure(sb->s_root,
     997             :                                         SB_RDONLY | SB_FORCE, SB_RDONLY);
     998           0 :                 if (!IS_ERR(fc)) {
     999           0 :                         if (parse_monolithic_mount_data(fc, NULL) == 0)
    1000           0 :                                 (void)reconfigure_super(fc);
    1001           0 :                         put_fs_context(fc);
    1002             :                 }
    1003             :         }
    1004           0 :         up_write(&sb->s_umount);
    1005           0 : }
    1006             : 
    1007           0 : static void do_emergency_remount(struct work_struct *work)
    1008             : {
    1009           0 :         __iterate_supers(do_emergency_remount_callback);
    1010           0 :         kfree(work);
    1011           0 :         printk("Emergency Remount complete\n");
    1012           0 : }
    1013             : 
    1014           0 : void emergency_remount(void)
    1015             : {
    1016             :         struct work_struct *work;
    1017             : 
    1018           0 :         work = kmalloc(sizeof(*work), GFP_ATOMIC);
    1019           0 :         if (work) {
    1020           0 :                 INIT_WORK(work, do_emergency_remount);
    1021             :                 schedule_work(work);
    1022             :         }
    1023           0 : }
    1024             : 
    1025           0 : static void do_thaw_all_callback(struct super_block *sb)
    1026             : {
    1027           0 :         down_write(&sb->s_umount);
    1028           0 :         if (sb->s_root && sb->s_flags & SB_BORN) {
    1029           0 :                 emergency_thaw_bdev(sb);
    1030           0 :                 thaw_super_locked(sb);
    1031             :         } else {
    1032           0 :                 up_write(&sb->s_umount);
    1033             :         }
    1034           0 : }
    1035             : 
    1036           0 : static void do_thaw_all(struct work_struct *work)
    1037             : {
    1038           0 :         __iterate_supers(do_thaw_all_callback);
    1039           0 :         kfree(work);
    1040           0 :         printk(KERN_WARNING "Emergency Thaw complete\n");
    1041           0 : }
    1042             : 
    1043             : /**
    1044             :  * emergency_thaw_all -- forcibly thaw every frozen filesystem
    1045             :  *
    1046             :  * Used for emergency unfreeze of all filesystems via SysRq
    1047             :  */
    1048           0 : void emergency_thaw_all(void)
    1049             : {
    1050             :         struct work_struct *work;
    1051             : 
    1052           0 :         work = kmalloc(sizeof(*work), GFP_ATOMIC);
    1053           0 :         if (work) {
    1054           0 :                 INIT_WORK(work, do_thaw_all);
    1055             :                 schedule_work(work);
    1056             :         }
    1057           0 : }
    1058             : 
    1059             : static DEFINE_IDA(unnamed_dev_ida);
    1060             : 
    1061             : /**
    1062             :  * get_anon_bdev - Allocate a block device for filesystems which don't have one.
    1063             :  * @p: Pointer to a dev_t.
    1064             :  *
    1065             :  * Filesystems which don't use real block devices can call this function
    1066             :  * to allocate a virtual block device.
    1067             :  *
    1068             :  * Context: Any context.  Frequently called while holding sb_lock.
    1069             :  * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
    1070             :  * or -ENOMEM if memory allocation failed.
    1071             :  */
    1072          32 : int get_anon_bdev(dev_t *p)
    1073             : {
    1074             :         int dev;
    1075             : 
    1076             :         /*
    1077             :          * Many userspace utilities consider an FSID of 0 invalid.
    1078             :          * Always return at least 1 from get_anon_bdev.
    1079             :          */
    1080          32 :         dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
    1081             :                         GFP_ATOMIC);
    1082          32 :         if (dev == -ENOSPC)
    1083           0 :                 dev = -EMFILE;
    1084          32 :         if (dev < 0)
    1085             :                 return dev;
    1086             : 
    1087          32 :         *p = MKDEV(0, dev);
    1088          32 :         return 0;
    1089             : }
    1090             : EXPORT_SYMBOL(get_anon_bdev);
    1091             : 
    1092           0 : void free_anon_bdev(dev_t dev)
    1093             : {
    1094          22 :         ida_free(&unnamed_dev_ida, MINOR(dev));
    1095           0 : }
    1096             : EXPORT_SYMBOL(free_anon_bdev);
    1097             : 
    1098           0 : int set_anon_super(struct super_block *s, void *data)
    1099             : {
    1100          32 :         return get_anon_bdev(&s->s_dev);
    1101             : }
    1102             : EXPORT_SYMBOL(set_anon_super);
    1103             : 
    1104          22 : void kill_anon_super(struct super_block *sb)
    1105             : {
    1106          22 :         dev_t dev = sb->s_dev;
    1107          22 :         generic_shutdown_super(sb);
    1108          22 :         free_anon_bdev(dev);
    1109          22 : }
    1110             : EXPORT_SYMBOL(kill_anon_super);
    1111             : 
    1112           0 : void kill_litter_super(struct super_block *sb)
    1113             : {
    1114           0 :         if (sb->s_root)
    1115           0 :                 d_genocide(sb->s_root);
    1116           0 :         kill_anon_super(sb);
    1117           0 : }
    1118             : EXPORT_SYMBOL(kill_litter_super);
    1119             : 
    1120          32 : int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
    1121             : {
    1122          32 :         return set_anon_super(sb, NULL);
    1123             : }
    1124             : EXPORT_SYMBOL(set_anon_super_fc);
    1125             : 
    1126           0 : static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
    1127             : {
    1128           0 :         return sb->s_fs_info == fc->s_fs_info;
    1129             : }
    1130             : 
    1131           0 : static int test_single_super(struct super_block *s, struct fs_context *fc)
    1132             : {
    1133           0 :         return 1;
    1134             : }
    1135             : 
    1136          32 : static int vfs_get_super(struct fs_context *fc, bool reconf,
    1137             :                 int (*test)(struct super_block *, struct fs_context *),
    1138             :                 int (*fill_super)(struct super_block *sb,
    1139             :                                   struct fs_context *fc))
    1140             : {
    1141             :         struct super_block *sb;
    1142             :         int err;
    1143             : 
    1144          32 :         sb = sget_fc(fc, test, set_anon_super_fc);
    1145          32 :         if (IS_ERR(sb))
    1146           0 :                 return PTR_ERR(sb);
    1147             : 
    1148          32 :         if (!sb->s_root) {
    1149          32 :                 err = fill_super(sb, fc);
    1150          32 :                 if (err)
    1151             :                         goto error;
    1152             : 
    1153          32 :                 sb->s_flags |= SB_ACTIVE;
    1154          64 :                 fc->root = dget(sb->s_root);
    1155             :         } else {
    1156           0 :                 fc->root = dget(sb->s_root);
    1157           0 :                 if (reconf) {
    1158           0 :                         err = reconfigure_super(fc);
    1159           0 :                         if (err < 0) {
    1160           0 :                                 dput(fc->root);
    1161           0 :                                 fc->root = NULL;
    1162           0 :                                 goto error;
    1163             :                         }
    1164             :                 }
    1165             :         }
    1166             : 
    1167             :         return 0;
    1168             : 
    1169             : error:
    1170           0 :         deactivate_locked_super(sb);
    1171           0 :         return err;
    1172             : }
    1173             : 
    1174          32 : int get_tree_nodev(struct fs_context *fc,
    1175             :                   int (*fill_super)(struct super_block *sb,
    1176             :                                     struct fs_context *fc))
    1177             : {
    1178          32 :         return vfs_get_super(fc, false, NULL, fill_super);
    1179             : }
    1180             : EXPORT_SYMBOL(get_tree_nodev);
    1181             : 
    1182           0 : int get_tree_single(struct fs_context *fc,
    1183             :                   int (*fill_super)(struct super_block *sb,
    1184             :                                     struct fs_context *fc))
    1185             : {
    1186           0 :         return vfs_get_super(fc, false, test_single_super, fill_super);
    1187             : }
    1188             : EXPORT_SYMBOL(get_tree_single);
    1189             : 
    1190           0 : int get_tree_single_reconf(struct fs_context *fc,
    1191             :                   int (*fill_super)(struct super_block *sb,
    1192             :                                     struct fs_context *fc))
    1193             : {
    1194           0 :         return vfs_get_super(fc, true, test_single_super, fill_super);
    1195             : }
    1196             : EXPORT_SYMBOL(get_tree_single_reconf);
    1197             : 
    1198           0 : int get_tree_keyed(struct fs_context *fc,
    1199             :                   int (*fill_super)(struct super_block *sb,
    1200             :                                     struct fs_context *fc),
    1201             :                 void *key)
    1202             : {
    1203           0 :         fc->s_fs_info = key;
    1204           0 :         return vfs_get_super(fc, false, test_keyed_super, fill_super);
    1205             : }
    1206             : EXPORT_SYMBOL(get_tree_keyed);
    1207             : 
    1208             : #ifdef CONFIG_BLOCK
    1209             : 
    1210           0 : static int set_bdev_super(struct super_block *s, void *data)
    1211             : {
    1212           0 :         s->s_bdev = data;
    1213           0 :         s->s_dev = s->s_bdev->bd_dev;
    1214           0 :         s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi);
    1215             : 
    1216           0 :         if (bdev_stable_writes(s->s_bdev))
    1217           0 :                 s->s_iflags |= SB_I_STABLE_WRITES;
    1218           0 :         return 0;
    1219             : }
    1220             : 
    1221           0 : static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
    1222             : {
    1223           0 :         return set_bdev_super(s, fc->sget_key);
    1224             : }
    1225             : 
    1226           0 : static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
    1227             : {
    1228           0 :         return !(s->s_iflags & SB_I_RETIRED) && s->s_bdev == fc->sget_key;
    1229             : }
    1230             : 
    1231             : /**
    1232             :  * get_tree_bdev - Get a superblock based on a single block device
    1233             :  * @fc: The filesystem context holding the parameters
    1234             :  * @fill_super: Helper to initialise a new superblock
    1235             :  */
    1236           0 : int get_tree_bdev(struct fs_context *fc,
    1237             :                 int (*fill_super)(struct super_block *,
    1238             :                                   struct fs_context *))
    1239             : {
    1240             :         struct block_device *bdev;
    1241             :         struct super_block *s;
    1242           0 :         fmode_t mode = FMODE_READ | FMODE_EXCL;
    1243           0 :         int error = 0;
    1244             : 
    1245           0 :         if (!(fc->sb_flags & SB_RDONLY))
    1246           0 :                 mode |= FMODE_WRITE;
    1247             : 
    1248           0 :         if (!fc->source)
    1249           0 :                 return invalf(fc, "No source specified");
    1250             : 
    1251           0 :         bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
    1252           0 :         if (IS_ERR(bdev)) {
    1253           0 :                 errorf(fc, "%s: Can't open blockdev", fc->source);
    1254           0 :                 return PTR_ERR(bdev);
    1255             :         }
    1256             : 
    1257             :         /* Once the superblock is inserted into the list by sget_fc(), s_umount
    1258             :          * will protect the lockfs code from trying to start a snapshot while
    1259             :          * we are mounting
    1260             :          */
    1261           0 :         mutex_lock(&bdev->bd_fsfreeze_mutex);
    1262           0 :         if (bdev->bd_fsfreeze_count > 0) {
    1263           0 :                 mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1264           0 :                 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
    1265           0 :                 blkdev_put(bdev, mode);
    1266           0 :                 return -EBUSY;
    1267             :         }
    1268             : 
    1269           0 :         fc->sb_flags |= SB_NOSEC;
    1270           0 :         fc->sget_key = bdev;
    1271           0 :         s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
    1272           0 :         mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1273           0 :         if (IS_ERR(s)) {
    1274           0 :                 blkdev_put(bdev, mode);
    1275           0 :                 return PTR_ERR(s);
    1276             :         }
    1277             : 
    1278           0 :         if (s->s_root) {
    1279             :                 /* Don't summarily change the RO/RW state. */
    1280           0 :                 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
    1281           0 :                         warnf(fc, "%pg: Can't mount, would change RO state", bdev);
    1282           0 :                         deactivate_locked_super(s);
    1283           0 :                         blkdev_put(bdev, mode);
    1284           0 :                         return -EBUSY;
    1285             :                 }
    1286             : 
    1287             :                 /*
    1288             :                  * s_umount nests inside open_mutex during
    1289             :                  * __invalidate_device().  blkdev_put() acquires
    1290             :                  * open_mutex and can't be called under s_umount.  Drop
    1291             :                  * s_umount temporarily.  This is safe as we're
    1292             :                  * holding an active reference.
    1293             :                  */
    1294           0 :                 up_write(&s->s_umount);
    1295           0 :                 blkdev_put(bdev, mode);
    1296           0 :                 down_write(&s->s_umount);
    1297             :         } else {
    1298           0 :                 s->s_mode = mode;
    1299           0 :                 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
    1300           0 :                 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
    1301             :                                         fc->fs_type->name, s->s_id);
    1302           0 :                 sb_set_blocksize(s, block_size(bdev));
    1303           0 :                 error = fill_super(s, fc);
    1304           0 :                 if (error) {
    1305           0 :                         deactivate_locked_super(s);
    1306           0 :                         return error;
    1307             :                 }
    1308             : 
    1309           0 :                 s->s_flags |= SB_ACTIVE;
    1310           0 :                 bdev->bd_super = s;
    1311             :         }
    1312             : 
    1313           0 :         BUG_ON(fc->root);
    1314           0 :         fc->root = dget(s->s_root);
    1315           0 :         return 0;
    1316             : }
    1317             : EXPORT_SYMBOL(get_tree_bdev);
    1318             : 
    1319           0 : static int test_bdev_super(struct super_block *s, void *data)
    1320             : {
    1321           0 :         return !(s->s_iflags & SB_I_RETIRED) && (void *)s->s_bdev == data;
    1322             : }
    1323             : 
    1324           0 : struct dentry *mount_bdev(struct file_system_type *fs_type,
    1325             :         int flags, const char *dev_name, void *data,
    1326             :         int (*fill_super)(struct super_block *, void *, int))
    1327             : {
    1328             :         struct block_device *bdev;
    1329             :         struct super_block *s;
    1330           0 :         fmode_t mode = FMODE_READ | FMODE_EXCL;
    1331           0 :         int error = 0;
    1332             : 
    1333           0 :         if (!(flags & SB_RDONLY))
    1334           0 :                 mode |= FMODE_WRITE;
    1335             : 
    1336           0 :         bdev = blkdev_get_by_path(dev_name, mode, fs_type);
    1337           0 :         if (IS_ERR(bdev))
    1338             :                 return ERR_CAST(bdev);
    1339             : 
    1340             :         /*
    1341             :          * once the super is inserted into the list by sget, s_umount
    1342             :          * will protect the lockfs code from trying to start a snapshot
    1343             :          * while we are mounting
    1344             :          */
    1345           0 :         mutex_lock(&bdev->bd_fsfreeze_mutex);
    1346           0 :         if (bdev->bd_fsfreeze_count > 0) {
    1347           0 :                 mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1348           0 :                 error = -EBUSY;
    1349           0 :                 goto error_bdev;
    1350             :         }
    1351           0 :         s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
    1352             :                  bdev);
    1353           0 :         mutex_unlock(&bdev->bd_fsfreeze_mutex);
    1354           0 :         if (IS_ERR(s))
    1355             :                 goto error_s;
    1356             : 
    1357           0 :         if (s->s_root) {
    1358           0 :                 if ((flags ^ s->s_flags) & SB_RDONLY) {
    1359           0 :                         deactivate_locked_super(s);
    1360           0 :                         error = -EBUSY;
    1361           0 :                         goto error_bdev;
    1362             :                 }
    1363             : 
    1364             :                 /*
    1365             :                  * s_umount nests inside open_mutex during
    1366             :                  * __invalidate_device().  blkdev_put() acquires
    1367             :                  * open_mutex and can't be called under s_umount.  Drop
    1368             :                  * s_umount temporarily.  This is safe as we're
    1369             :                  * holding an active reference.
    1370             :                  */
    1371           0 :                 up_write(&s->s_umount);
    1372           0 :                 blkdev_put(bdev, mode);
    1373           0 :                 down_write(&s->s_umount);
    1374             :         } else {
    1375           0 :                 s->s_mode = mode;
    1376           0 :                 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
    1377           0 :                 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
    1378             :                                         fs_type->name, s->s_id);
    1379           0 :                 sb_set_blocksize(s, block_size(bdev));
    1380           0 :                 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
    1381           0 :                 if (error) {
    1382           0 :                         deactivate_locked_super(s);
    1383           0 :                         goto error;
    1384             :                 }
    1385             : 
    1386           0 :                 s->s_flags |= SB_ACTIVE;
    1387           0 :                 bdev->bd_super = s;
    1388             :         }
    1389             : 
    1390           0 :         return dget(s->s_root);
    1391             : 
    1392             : error_s:
    1393           0 :         error = PTR_ERR(s);
    1394             : error_bdev:
    1395           0 :         blkdev_put(bdev, mode);
    1396             : error:
    1397           0 :         return ERR_PTR(error);
    1398             : }
    1399             : EXPORT_SYMBOL(mount_bdev);
    1400             : 
    1401           0 : void kill_block_super(struct super_block *sb)
    1402             : {
    1403           0 :         struct block_device *bdev = sb->s_bdev;
    1404           0 :         fmode_t mode = sb->s_mode;
    1405             : 
    1406           0 :         bdev->bd_super = NULL;
    1407           0 :         generic_shutdown_super(sb);
    1408           0 :         sync_blockdev(bdev);
    1409           0 :         WARN_ON_ONCE(!(mode & FMODE_EXCL));
    1410           0 :         blkdev_put(bdev, mode | FMODE_EXCL);
    1411           0 : }
    1412             : 
    1413             : EXPORT_SYMBOL(kill_block_super);
    1414             : #endif
    1415             : 
    1416           0 : struct dentry *mount_nodev(struct file_system_type *fs_type,
    1417             :         int flags, void *data,
    1418             :         int (*fill_super)(struct super_block *, void *, int))
    1419             : {
    1420             :         int error;
    1421           0 :         struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
    1422             : 
    1423           0 :         if (IS_ERR(s))
    1424             :                 return ERR_CAST(s);
    1425             : 
    1426           0 :         error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
    1427           0 :         if (error) {
    1428           0 :                 deactivate_locked_super(s);
    1429           0 :                 return ERR_PTR(error);
    1430             :         }
    1431           0 :         s->s_flags |= SB_ACTIVE;
    1432           0 :         return dget(s->s_root);
    1433             : }
    1434             : EXPORT_SYMBOL(mount_nodev);
    1435             : 
    1436           0 : int reconfigure_single(struct super_block *s,
    1437             :                        int flags, void *data)
    1438             : {
    1439             :         struct fs_context *fc;
    1440             :         int ret;
    1441             : 
    1442             :         /* The caller really need to be passing fc down into mount_single(),
    1443             :          * then a chunk of this can be removed.  [Bollocks -- AV]
    1444             :          * Better yet, reconfiguration shouldn't happen, but rather the second
    1445             :          * mount should be rejected if the parameters are not compatible.
    1446             :          */
    1447           0 :         fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
    1448           0 :         if (IS_ERR(fc))
    1449           0 :                 return PTR_ERR(fc);
    1450             : 
    1451           0 :         ret = parse_monolithic_mount_data(fc, data);
    1452           0 :         if (ret < 0)
    1453             :                 goto out;
    1454             : 
    1455           0 :         ret = reconfigure_super(fc);
    1456             : out:
    1457           0 :         put_fs_context(fc);
    1458           0 :         return ret;
    1459             : }
    1460             : 
    1461           0 : static int compare_single(struct super_block *s, void *p)
    1462             : {
    1463           0 :         return 1;
    1464             : }
    1465             : 
    1466           0 : struct dentry *mount_single(struct file_system_type *fs_type,
    1467             :         int flags, void *data,
    1468             :         int (*fill_super)(struct super_block *, void *, int))
    1469             : {
    1470             :         struct super_block *s;
    1471             :         int error;
    1472             : 
    1473           0 :         s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
    1474           0 :         if (IS_ERR(s))
    1475             :                 return ERR_CAST(s);
    1476           0 :         if (!s->s_root) {
    1477           0 :                 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
    1478           0 :                 if (!error)
    1479           0 :                         s->s_flags |= SB_ACTIVE;
    1480             :         } else {
    1481           0 :                 error = reconfigure_single(s, flags, data);
    1482             :         }
    1483           0 :         if (unlikely(error)) {
    1484           0 :                 deactivate_locked_super(s);
    1485           0 :                 return ERR_PTR(error);
    1486             :         }
    1487           0 :         return dget(s->s_root);
    1488             : }
    1489             : EXPORT_SYMBOL(mount_single);
    1490             : 
    1491             : /**
    1492             :  * vfs_get_tree - Get the mountable root
    1493             :  * @fc: The superblock configuration context.
    1494             :  *
    1495             :  * The filesystem is invoked to get or create a superblock which can then later
    1496             :  * be used for mounting.  The filesystem places a pointer to the root to be
    1497             :  * used for mounting in @fc->root.
    1498             :  */
    1499          32 : int vfs_get_tree(struct fs_context *fc)
    1500             : {
    1501             :         struct super_block *sb;
    1502             :         int error;
    1503             : 
    1504          32 :         if (fc->root)
    1505             :                 return -EBUSY;
    1506             : 
    1507             :         /* Get the mountable root in fc->root, with a ref on the root and a ref
    1508             :          * on the superblock.
    1509             :          */
    1510          32 :         error = fc->ops->get_tree(fc);
    1511          32 :         if (error < 0)
    1512             :                 return error;
    1513             : 
    1514          32 :         if (!fc->root) {
    1515           0 :                 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
    1516             :                        fc->fs_type->name);
    1517             :                 /* We don't know what the locking state of the superblock is -
    1518             :                  * if there is a superblock.
    1519             :                  */
    1520           0 :                 BUG();
    1521             :         }
    1522             : 
    1523          32 :         sb = fc->root->d_sb;
    1524          32 :         WARN_ON(!sb->s_bdi);
    1525             : 
    1526             :         /*
    1527             :          * Write barrier is for super_cache_count(). We place it before setting
    1528             :          * SB_BORN as the data dependency between the two functions is the
    1529             :          * superblock structure contents that we just set up, not the SB_BORN
    1530             :          * flag.
    1531             :          */
    1532          32 :         smp_wmb();
    1533          32 :         sb->s_flags |= SB_BORN;
    1534             : 
    1535          32 :         error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
    1536             :         if (unlikely(error)) {
    1537             :                 fc_drop_locked(fc);
    1538             :                 return error;
    1539             :         }
    1540             : 
    1541             :         /*
    1542             :          * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
    1543             :          * but s_maxbytes was an unsigned long long for many releases. Throw
    1544             :          * this warning for a little while to try and catch filesystems that
    1545             :          * violate this rule.
    1546             :          */
    1547          32 :         WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
    1548             :                 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
    1549             : 
    1550             :         return 0;
    1551             : }
    1552             : EXPORT_SYMBOL(vfs_get_tree);
    1553             : 
    1554             : /*
    1555             :  * Setup private BDI for given superblock. It gets automatically cleaned up
    1556             :  * in generic_shutdown_super().
    1557             :  */
    1558           0 : int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
    1559             : {
    1560             :         struct backing_dev_info *bdi;
    1561             :         int err;
    1562             :         va_list args;
    1563             : 
    1564           0 :         bdi = bdi_alloc(NUMA_NO_NODE);
    1565           0 :         if (!bdi)
    1566             :                 return -ENOMEM;
    1567             : 
    1568           0 :         va_start(args, fmt);
    1569           0 :         err = bdi_register_va(bdi, fmt, args);
    1570           0 :         va_end(args);
    1571           0 :         if (err) {
    1572           0 :                 bdi_put(bdi);
    1573           0 :                 return err;
    1574             :         }
    1575           0 :         WARN_ON(sb->s_bdi != &noop_backing_dev_info);
    1576           0 :         sb->s_bdi = bdi;
    1577           0 :         sb->s_iflags |= SB_I_PERSB_BDI;
    1578             : 
    1579           0 :         return 0;
    1580             : }
    1581             : EXPORT_SYMBOL(super_setup_bdi_name);
    1582             : 
    1583             : /*
    1584             :  * Setup private BDI for given superblock. I gets automatically cleaned up
    1585             :  * in generic_shutdown_super().
    1586             :  */
    1587           0 : int super_setup_bdi(struct super_block *sb)
    1588             : {
    1589             :         static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
    1590             : 
    1591           0 :         return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
    1592             :                                     atomic_long_inc_return(&bdi_seq));
    1593             : }
    1594             : EXPORT_SYMBOL(super_setup_bdi);
    1595             : 
    1596             : /**
    1597             :  * sb_wait_write - wait until all writers to given file system finish
    1598             :  * @sb: the super for which we wait
    1599             :  * @level: type of writers we wait for (normal vs page fault)
    1600             :  *
    1601             :  * This function waits until there are no writers of given type to given file
    1602             :  * system.
    1603             :  */
    1604             : static void sb_wait_write(struct super_block *sb, int level)
    1605             : {
    1606           0 :         percpu_down_write(sb->s_writers.rw_sem + level-1);
    1607             : }
    1608             : 
    1609             : /*
    1610             :  * We are going to return to userspace and forget about these locks, the
    1611             :  * ownership goes to the caller of thaw_super() which does unlock().
    1612             :  */
    1613             : static void lockdep_sb_freeze_release(struct super_block *sb)
    1614             : {
    1615             :         int level;
    1616             : 
    1617           0 :         for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
    1618             :                 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
    1619             : }
    1620             : 
    1621             : /*
    1622             :  * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
    1623             :  */
    1624             : static void lockdep_sb_freeze_acquire(struct super_block *sb)
    1625             : {
    1626             :         int level;
    1627             : 
    1628           0 :         for (level = 0; level < SB_FREEZE_LEVELS; ++level)
    1629             :                 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
    1630             : }
    1631             : 
    1632             : static void sb_freeze_unlock(struct super_block *sb, int level)
    1633             : {
    1634           0 :         for (level--; level >= 0; level--)
    1635           0 :                 percpu_up_write(sb->s_writers.rw_sem + level);
    1636             : }
    1637             : 
    1638             : /**
    1639             :  * freeze_super - lock the filesystem and force it into a consistent state
    1640             :  * @sb: the super to lock
    1641             :  *
    1642             :  * Syncs the super to make sure the filesystem is consistent and calls the fs's
    1643             :  * freeze_fs.  Subsequent calls to this without first thawing the fs will return
    1644             :  * -EBUSY.
    1645             :  *
    1646             :  * During this function, sb->s_writers.frozen goes through these values:
    1647             :  *
    1648             :  * SB_UNFROZEN: File system is normal, all writes progress as usual.
    1649             :  *
    1650             :  * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
    1651             :  * writes should be blocked, though page faults are still allowed. We wait for
    1652             :  * all writes to complete and then proceed to the next stage.
    1653             :  *
    1654             :  * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
    1655             :  * but internal fs threads can still modify the filesystem (although they
    1656             :  * should not dirty new pages or inodes), writeback can run etc. After waiting
    1657             :  * for all running page faults we sync the filesystem which will clean all
    1658             :  * dirty pages and inodes (no new dirty pages or inodes can be created when
    1659             :  * sync is running).
    1660             :  *
    1661             :  * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
    1662             :  * modification are blocked (e.g. XFS preallocation truncation on inode
    1663             :  * reclaim). This is usually implemented by blocking new transactions for
    1664             :  * filesystems that have them and need this additional guard. After all
    1665             :  * internal writers are finished we call ->freeze_fs() to finish filesystem
    1666             :  * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
    1667             :  * mostly auxiliary for filesystems to verify they do not modify frozen fs.
    1668             :  *
    1669             :  * sb->s_writers.frozen is protected by sb->s_umount.
    1670             :  */
    1671           0 : int freeze_super(struct super_block *sb)
    1672             : {
    1673             :         int ret;
    1674             : 
    1675           0 :         atomic_inc(&sb->s_active);
    1676           0 :         down_write(&sb->s_umount);
    1677           0 :         if (sb->s_writers.frozen != SB_UNFROZEN) {
    1678           0 :                 deactivate_locked_super(sb);
    1679           0 :                 return -EBUSY;
    1680             :         }
    1681             : 
    1682           0 :         if (!(sb->s_flags & SB_BORN)) {
    1683           0 :                 up_write(&sb->s_umount);
    1684           0 :                 return 0;       /* sic - it's "nothing to do" */
    1685             :         }
    1686             : 
    1687           0 :         if (sb_rdonly(sb)) {
    1688             :                 /* Nothing to do really... */
    1689           0 :                 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
    1690           0 :                 up_write(&sb->s_umount);
    1691           0 :                 return 0;
    1692             :         }
    1693             : 
    1694           0 :         sb->s_writers.frozen = SB_FREEZE_WRITE;
    1695             :         /* Release s_umount to preserve sb_start_write -> s_umount ordering */
    1696           0 :         up_write(&sb->s_umount);
    1697           0 :         sb_wait_write(sb, SB_FREEZE_WRITE);
    1698           0 :         down_write(&sb->s_umount);
    1699             : 
    1700             :         /* Now we go and block page faults... */
    1701           0 :         sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
    1702           0 :         sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
    1703             : 
    1704             :         /* All writers are done so after syncing there won't be dirty data */
    1705           0 :         ret = sync_filesystem(sb);
    1706           0 :         if (ret) {
    1707           0 :                 sb->s_writers.frozen = SB_UNFROZEN;
    1708           0 :                 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
    1709           0 :                 wake_up(&sb->s_writers.wait_unfrozen);
    1710           0 :                 deactivate_locked_super(sb);
    1711           0 :                 return ret;
    1712             :         }
    1713             : 
    1714             :         /* Now wait for internal filesystem counter */
    1715           0 :         sb->s_writers.frozen = SB_FREEZE_FS;
    1716           0 :         sb_wait_write(sb, SB_FREEZE_FS);
    1717             : 
    1718           0 :         if (sb->s_op->freeze_fs) {
    1719           0 :                 ret = sb->s_op->freeze_fs(sb);
    1720           0 :                 if (ret) {
    1721           0 :                         printk(KERN_ERR
    1722             :                                 "VFS:Filesystem freeze failed\n");
    1723           0 :                         sb->s_writers.frozen = SB_UNFROZEN;
    1724           0 :                         sb_freeze_unlock(sb, SB_FREEZE_FS);
    1725           0 :                         wake_up(&sb->s_writers.wait_unfrozen);
    1726           0 :                         deactivate_locked_super(sb);
    1727           0 :                         return ret;
    1728             :                 }
    1729             :         }
    1730             :         /*
    1731             :          * For debugging purposes so that fs can warn if it sees write activity
    1732             :          * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
    1733             :          */
    1734           0 :         sb->s_writers.frozen = SB_FREEZE_COMPLETE;
    1735           0 :         lockdep_sb_freeze_release(sb);
    1736           0 :         up_write(&sb->s_umount);
    1737           0 :         return 0;
    1738             : }
    1739             : EXPORT_SYMBOL(freeze_super);
    1740             : 
    1741           0 : static int thaw_super_locked(struct super_block *sb)
    1742             : {
    1743             :         int error;
    1744             : 
    1745           0 :         if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
    1746           0 :                 up_write(&sb->s_umount);
    1747           0 :                 return -EINVAL;
    1748             :         }
    1749             : 
    1750           0 :         if (sb_rdonly(sb)) {
    1751           0 :                 sb->s_writers.frozen = SB_UNFROZEN;
    1752           0 :                 goto out;
    1753             :         }
    1754             : 
    1755           0 :         lockdep_sb_freeze_acquire(sb);
    1756             : 
    1757           0 :         if (sb->s_op->unfreeze_fs) {
    1758           0 :                 error = sb->s_op->unfreeze_fs(sb);
    1759           0 :                 if (error) {
    1760           0 :                         printk(KERN_ERR
    1761             :                                 "VFS:Filesystem thaw failed\n");
    1762           0 :                         lockdep_sb_freeze_release(sb);
    1763           0 :                         up_write(&sb->s_umount);
    1764           0 :                         return error;
    1765             :                 }
    1766             :         }
    1767             : 
    1768           0 :         sb->s_writers.frozen = SB_UNFROZEN;
    1769             :         sb_freeze_unlock(sb, SB_FREEZE_FS);
    1770             : out:
    1771           0 :         wake_up(&sb->s_writers.wait_unfrozen);
    1772           0 :         deactivate_locked_super(sb);
    1773           0 :         return 0;
    1774             : }
    1775             : 
    1776             : /**
    1777             :  * thaw_super -- unlock filesystem
    1778             :  * @sb: the super to thaw
    1779             :  *
    1780             :  * Unlocks the filesystem and marks it writeable again after freeze_super().
    1781             :  */
    1782           0 : int thaw_super(struct super_block *sb)
    1783             : {
    1784           0 :         down_write(&sb->s_umount);
    1785           0 :         return thaw_super_locked(sb);
    1786             : }
    1787             : EXPORT_SYMBOL(thaw_super);
    1788             : 
    1789             : /*
    1790             :  * Create workqueue for deferred direct IO completions. We allocate the
    1791             :  * workqueue when it's first needed. This avoids creating workqueue for
    1792             :  * filesystems that don't need it and also allows us to create the workqueue
    1793             :  * late enough so the we can include s_id in the name of the workqueue.
    1794             :  */
    1795           0 : int sb_init_dio_done_wq(struct super_block *sb)
    1796             : {
    1797             :         struct workqueue_struct *old;
    1798           0 :         struct workqueue_struct *wq = alloc_workqueue("dio/%s",
    1799             :                                                       WQ_MEM_RECLAIM, 0,
    1800           0 :                                                       sb->s_id);
    1801           0 :         if (!wq)
    1802             :                 return -ENOMEM;
    1803             :         /*
    1804             :          * This has to be atomic as more DIOs can race to create the workqueue
    1805             :          */
    1806           0 :         old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
    1807             :         /* Someone created workqueue before us? Free ours... */
    1808           0 :         if (old)
    1809           0 :                 destroy_workqueue(wq);
    1810             :         return 0;
    1811             : }

Generated by: LCOV version 1.14