LCOV - code coverage report
Current view: top level - kernel - kthread.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 148 433 34.2 %
Date: 2023-07-19 18:55:55 Functions: 21 51 41.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /* Kernel thread helper functions.
       3             :  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
       4             :  *   Copyright (C) 2009 Red Hat, Inc.
       5             :  *
       6             :  * Creation is done via kthreadd, so that we get a clean environment
       7             :  * even if we're invoked from userspace (think modprobe, hotplug cpu,
       8             :  * etc.).
       9             :  */
      10             : #include <uapi/linux/sched/types.h>
      11             : #include <linux/mm.h>
      12             : #include <linux/mmu_context.h>
      13             : #include <linux/sched.h>
      14             : #include <linux/sched/mm.h>
      15             : #include <linux/sched/task.h>
      16             : #include <linux/kthread.h>
      17             : #include <linux/completion.h>
      18             : #include <linux/err.h>
      19             : #include <linux/cgroup.h>
      20             : #include <linux/cpuset.h>
      21             : #include <linux/unistd.h>
      22             : #include <linux/file.h>
      23             : #include <linux/export.h>
      24             : #include <linux/mutex.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/freezer.h>
      27             : #include <linux/ptrace.h>
      28             : #include <linux/uaccess.h>
      29             : #include <linux/numa.h>
      30             : #include <linux/sched/isolation.h>
      31             : #include <trace/events/sched.h>
      32             : 
      33             : 
      34             : static DEFINE_SPINLOCK(kthread_create_lock);
      35             : static LIST_HEAD(kthread_create_list);
      36             : struct task_struct *kthreadd_task;
      37             : 
      38             : struct kthread_create_info
      39             : {
      40             :         /* Information passed to kthread() from kthreadd. */
      41             :         char *full_name;
      42             :         int (*threadfn)(void *data);
      43             :         void *data;
      44             :         int node;
      45             : 
      46             :         /* Result passed back to kthread_create() from kthreadd. */
      47             :         struct task_struct *result;
      48             :         struct completion *done;
      49             : 
      50             :         struct list_head list;
      51             : };
      52             : 
      53             : struct kthread {
      54             :         unsigned long flags;
      55             :         unsigned int cpu;
      56             :         int result;
      57             :         int (*threadfn)(void *);
      58             :         void *data;
      59             :         struct completion parked;
      60             :         struct completion exited;
      61             : #ifdef CONFIG_BLK_CGROUP
      62             :         struct cgroup_subsys_state *blkcg_css;
      63             : #endif
      64             :         /* To store the full name if task comm is truncated. */
      65             :         char *full_name;
      66             : };
      67             : 
      68             : enum KTHREAD_BITS {
      69             :         KTHREAD_IS_PER_CPU = 0,
      70             :         KTHREAD_SHOULD_STOP,
      71             :         KTHREAD_SHOULD_PARK,
      72             : };
      73             : 
      74             : static inline struct kthread *to_kthread(struct task_struct *k)
      75             : {
      76        3483 :         WARN_ON(!(k->flags & PF_KTHREAD));
      77        3483 :         return k->worker_private;
      78             : }
      79             : 
      80             : /*
      81             :  * Variant of to_kthread() that doesn't assume @p is a kthread.
      82             :  *
      83             :  * Per construction; when:
      84             :  *
      85             :  *   (p->flags & PF_KTHREAD) && p->worker_private
      86             :  *
      87             :  * the task is both a kthread and struct kthread is persistent. However
      88             :  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
      89             :  * begin_new_exec()).
      90             :  */
      91             : static inline struct kthread *__to_kthread(struct task_struct *p)
      92             : {
      93           0 :         void *kthread = p->worker_private;
      94           0 :         if (kthread && !(p->flags & PF_KTHREAD))
      95           0 :                 kthread = NULL;
      96             :         return kthread;
      97             : }
      98             : 
      99           0 : void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
     100             : {
     101           0 :         struct kthread *kthread = to_kthread(tsk);
     102             : 
     103           0 :         if (!kthread || !kthread->full_name) {
     104           0 :                 __get_task_comm(buf, buf_size, tsk);
     105           0 :                 return;
     106             :         }
     107             : 
     108           0 :         strscpy_pad(buf, kthread->full_name, buf_size);
     109             : }
     110             : 
     111         382 : bool set_kthread_struct(struct task_struct *p)
     112             : {
     113             :         struct kthread *kthread;
     114             : 
     115         764 :         if (WARN_ON_ONCE(to_kthread(p)))
     116             :                 return false;
     117             : 
     118         382 :         kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
     119         382 :         if (!kthread)
     120             :                 return false;
     121             : 
     122         764 :         init_completion(&kthread->exited);
     123         764 :         init_completion(&kthread->parked);
     124         382 :         p->vfork_done = &kthread->exited;
     125             : 
     126         382 :         p->worker_private = kthread;
     127         382 :         return true;
     128             : }
     129             : 
     130         366 : void free_kthread_struct(struct task_struct *k)
     131             : {
     132             :         struct kthread *kthread;
     133             : 
     134             :         /*
     135             :          * Can be NULL if kmalloc() in set_kthread_struct() failed.
     136             :          */
     137         732 :         kthread = to_kthread(k);
     138         366 :         if (!kthread)
     139             :                 return;
     140             : 
     141             : #ifdef CONFIG_BLK_CGROUP
     142             :         WARN_ON_ONCE(kthread->blkcg_css);
     143             : #endif
     144         366 :         k->worker_private = NULL;
     145         366 :         kfree(kthread->full_name);
     146         366 :         kfree(kthread);
     147             : }
     148             : 
     149             : /**
     150             :  * kthread_should_stop - should this kthread return now?
     151             :  *
     152             :  * When someone calls kthread_stop() on your kthread, it will be woken
     153             :  * and this will return true.  You should then return, and your return
     154             :  * value will be passed through to kthread_stop().
     155             :  */
     156        1001 : bool kthread_should_stop(void)
     157             : {
     158        3003 :         return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
     159             : }
     160             : EXPORT_SYMBOL(kthread_should_stop);
     161             : 
     162         800 : bool __kthread_should_park(struct task_struct *k)
     163             : {
     164        2400 :         return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
     165             : }
     166             : EXPORT_SYMBOL_GPL(__kthread_should_park);
     167             : 
     168             : /**
     169             :  * kthread_should_park - should this kthread park now?
     170             :  *
     171             :  * When someone calls kthread_park() on your kthread, it will be woken
     172             :  * and this will return true.  You should then do the necessary
     173             :  * cleanup and call kthread_parkme()
     174             :  *
     175             :  * Similar to kthread_should_stop(), but this keeps the thread alive
     176             :  * and in a park position. kthread_unpark() "restarts" the thread and
     177             :  * calls the thread function again.
     178             :  */
     179         780 : bool kthread_should_park(void)
     180             : {
     181         780 :         return __kthread_should_park(current);
     182             : }
     183             : EXPORT_SYMBOL_GPL(kthread_should_park);
     184             : 
     185             : /**
     186             :  * kthread_freezable_should_stop - should this freezable kthread return now?
     187             :  * @was_frozen: optional out parameter, indicates whether %current was frozen
     188             :  *
     189             :  * kthread_should_stop() for freezable kthreads, which will enter
     190             :  * refrigerator if necessary.  This function is safe from kthread_stop() /
     191             :  * freezer deadlock and freezable kthreads should use this function instead
     192             :  * of calling try_to_freeze() directly.
     193             :  */
     194           0 : bool kthread_freezable_should_stop(bool *was_frozen)
     195             : {
     196           0 :         bool frozen = false;
     197             : 
     198             :         might_sleep();
     199             : 
     200           0 :         if (unlikely(freezing(current)))
     201           0 :                 frozen = __refrigerator(true);
     202             : 
     203           0 :         if (was_frozen)
     204           0 :                 *was_frozen = frozen;
     205             : 
     206           0 :         return kthread_should_stop();
     207             : }
     208             : EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
     209             : 
     210             : /**
     211             :  * kthread_func - return the function specified on kthread creation
     212             :  * @task: kthread task in question
     213             :  *
     214             :  * Returns NULL if the task is not a kthread.
     215             :  */
     216           0 : void *kthread_func(struct task_struct *task)
     217             : {
     218           0 :         struct kthread *kthread = __to_kthread(task);
     219           0 :         if (kthread)
     220           0 :                 return kthread->threadfn;
     221             :         return NULL;
     222             : }
     223             : EXPORT_SYMBOL_GPL(kthread_func);
     224             : 
     225             : /**
     226             :  * kthread_data - return data value specified on kthread creation
     227             :  * @task: kthread task in question
     228             :  *
     229             :  * Return the data value specified when kthread @task was created.
     230             :  * The caller is responsible for ensuring the validity of @task when
     231             :  * calling this function.
     232             :  */
     233         179 : void *kthread_data(struct task_struct *task)
     234             : {
     235         358 :         return to_kthread(task)->data;
     236             : }
     237             : EXPORT_SYMBOL_GPL(kthread_data);
     238             : 
     239             : /**
     240             :  * kthread_probe_data - speculative version of kthread_data()
     241             :  * @task: possible kthread task in question
     242             :  *
     243             :  * @task could be a kthread task.  Return the data value specified when it
     244             :  * was created if accessible.  If @task isn't a kthread task or its data is
     245             :  * inaccessible for any reason, %NULL is returned.  This function requires
     246             :  * that @task itself is safe to dereference.
     247             :  */
     248           0 : void *kthread_probe_data(struct task_struct *task)
     249             : {
     250           0 :         struct kthread *kthread = __to_kthread(task);
     251           0 :         void *data = NULL;
     252             : 
     253           0 :         if (kthread)
     254           0 :                 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
     255           0 :         return data;
     256             : }
     257             : 
     258         380 : static void __kthread_parkme(struct kthread *self)
     259             : {
     260             :         for (;;) {
     261             :                 /*
     262             :                  * TASK_PARKED is a special state; we must serialize against
     263             :                  * possible pending wakeups to avoid store-store collisions on
     264             :                  * task->state.
     265             :                  *
     266             :                  * Such a collision might possibly result in the task state
     267             :                  * changin from TASK_PARKED and us failing the
     268             :                  * wait_task_inactive() in kthread_park().
     269             :                  */
     270        1906 :                 set_special_state(TASK_PARKED);
     271         762 :                 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
     272             :                         break;
     273             : 
     274             :                 /*
     275             :                  * Thread is going to call schedule(), do not preempt it,
     276             :                  * or the caller of kthread_park() may spend more time in
     277             :                  * wait_task_inactive().
     278             :                  */
     279           1 :                 preempt_disable();
     280           1 :                 complete(&self->parked);
     281           1 :                 schedule_preempt_disabled();
     282           1 :                 preempt_enable();
     283             :         }
     284         380 :         __set_current_state(TASK_RUNNING);
     285         380 : }
     286             : 
     287           0 : void kthread_parkme(void)
     288             : {
     289           0 :         __kthread_parkme(to_kthread(current));
     290           0 : }
     291             : EXPORT_SYMBOL_GPL(kthread_parkme);
     292             : 
     293             : /**
     294             :  * kthread_exit - Cause the current kthread return @result to kthread_stop().
     295             :  * @result: The integer value to return to kthread_stop().
     296             :  *
     297             :  * While kthread_exit can be called directly, it exists so that
     298             :  * functions which do some additional work in non-modular code such as
     299             :  * module_put_and_kthread_exit can be implemented.
     300             :  *
     301             :  * Does not return.
     302             :  */
     303         367 : void __noreturn kthread_exit(long result)
     304             : {
     305         734 :         struct kthread *kthread = to_kthread(current);
     306         367 :         kthread->result = result;
     307         367 :         do_exit(0);
     308             : }
     309             : 
     310             : /**
     311             :  * kthread_complete_and_exit - Exit the current kthread.
     312             :  * @comp: Completion to complete
     313             :  * @code: The integer value to return to kthread_stop().
     314             :  *
     315             :  * If present complete @comp and the reuturn code to kthread_stop().
     316             :  *
     317             :  * A kernel thread whose module may be removed after the completion of
     318             :  * @comp can use this function exit safely.
     319             :  *
     320             :  * Does not return.
     321             :  */
     322         367 : void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
     323             : {
     324         367 :         if (comp)
     325         367 :                 complete(comp);
     326             : 
     327         367 :         kthread_exit(code);
     328             : }
     329             : EXPORT_SYMBOL(kthread_complete_and_exit);
     330             : 
     331         380 : static int kthread(void *_create)
     332             : {
     333             :         static const struct sched_param param = { .sched_priority = 0 };
     334             :         /* Copy data: it's on kthread's stack */
     335         380 :         struct kthread_create_info *create = _create;
     336         380 :         int (*threadfn)(void *data) = create->threadfn;
     337         380 :         void *data = create->data;
     338             :         struct completion *done;
     339             :         struct kthread *self;
     340             :         int ret;
     341             : 
     342         760 :         self = to_kthread(current);
     343             : 
     344             :         /* Release the structure when caller killed by a fatal signal. */
     345         760 :         done = xchg(&create->done, NULL);
     346         380 :         if (!done) {
     347           0 :                 kfree(create->full_name);
     348           0 :                 kfree(create);
     349           0 :                 kthread_exit(-EINTR);
     350             :         }
     351             : 
     352         380 :         self->full_name = create->full_name;
     353         380 :         self->threadfn = threadfn;
     354         380 :         self->data = data;
     355             : 
     356             :         /*
     357             :          * The new thread inherited kthreadd's priority and CPU mask. Reset
     358             :          * back to default in case they have been changed.
     359             :          */
     360         380 :         sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
     361        1140 :         set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
     362             : 
     363             :         /* OK, tell user we're spawned, wait for stop or wakeup */
     364         380 :         __set_current_state(TASK_UNINTERRUPTIBLE);
     365         380 :         create->result = current;
     366             :         /*
     367             :          * Thread is going to call schedule(), do not preempt it,
     368             :          * or the creator may spend more time in wait_task_inactive().
     369             :          */
     370         380 :         preempt_disable();
     371         380 :         complete(done);
     372         380 :         schedule_preempt_disabled();
     373         380 :         preempt_enable();
     374             : 
     375         380 :         ret = -EINTR;
     376         760 :         if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
     377             :                 cgroup_kthread_ready();
     378         380 :                 __kthread_parkme(self);
     379         380 :                 ret = threadfn(data);
     380             :         }
     381           0 :         kthread_exit(ret);
     382             : }
     383             : 
     384             : /* called from kernel_clone() to get node information for about to be created task */
     385         382 : int tsk_fork_get_node(struct task_struct *tsk)
     386             : {
     387             : #ifdef CONFIG_NUMA
     388             :         if (tsk == kthreadd_task)
     389             :                 return tsk->pref_node_fork;
     390             : #endif
     391         382 :         return NUMA_NO_NODE;
     392             : }
     393             : 
     394         380 : static void create_kthread(struct kthread_create_info *create)
     395             : {
     396             :         int pid;
     397             : 
     398             : #ifdef CONFIG_NUMA
     399             :         current->pref_node_fork = create->node;
     400             : #endif
     401             :         /* We want our own signal handler (we take no signals by default). */
     402         380 :         pid = kernel_thread(kthread, create, create->full_name,
     403             :                             CLONE_FS | CLONE_FILES | SIGCHLD);
     404         380 :         if (pid < 0) {
     405             :                 /* Release the structure when caller killed by a fatal signal. */
     406           0 :                 struct completion *done = xchg(&create->done, NULL);
     407             : 
     408           0 :                 kfree(create->full_name);
     409           0 :                 if (!done) {
     410           0 :                         kfree(create);
     411           0 :                         return;
     412             :                 }
     413           0 :                 create->result = ERR_PTR(pid);
     414           0 :                 complete(done);
     415             :         }
     416             : }
     417             : 
     418             : static __printf(4, 0)
     419         380 : struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
     420             :                                                     void *data, int node,
     421             :                                                     const char namefmt[],
     422             :                                                     va_list args)
     423             : {
     424         380 :         DECLARE_COMPLETION_ONSTACK(done);
     425             :         struct task_struct *task;
     426         380 :         struct kthread_create_info *create = kmalloc(sizeof(*create),
     427             :                                                      GFP_KERNEL);
     428             : 
     429         380 :         if (!create)
     430             :                 return ERR_PTR(-ENOMEM);
     431         380 :         create->threadfn = threadfn;
     432         380 :         create->data = data;
     433         380 :         create->node = node;
     434         380 :         create->done = &done;
     435         380 :         create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
     436         380 :         if (!create->full_name) {
     437             :                 task = ERR_PTR(-ENOMEM);
     438             :                 goto free_create;
     439             :         }
     440             : 
     441         380 :         spin_lock(&kthread_create_lock);
     442         760 :         list_add_tail(&create->list, &kthread_create_list);
     443         380 :         spin_unlock(&kthread_create_lock);
     444             : 
     445         380 :         wake_up_process(kthreadd_task);
     446             :         /*
     447             :          * Wait for completion in killable state, for I might be chosen by
     448             :          * the OOM killer while kthreadd is trying to allocate memory for
     449             :          * new kernel thread.
     450             :          */
     451         380 :         if (unlikely(wait_for_completion_killable(&done))) {
     452             :                 /*
     453             :                  * If I was killed by a fatal signal before kthreadd (or new
     454             :                  * kernel thread) calls complete(), leave the cleanup of this
     455             :                  * structure to that thread.
     456             :                  */
     457           0 :                 if (xchg(&create->done, NULL))
     458             :                         return ERR_PTR(-EINTR);
     459             :                 /*
     460             :                  * kthreadd (or new kernel thread) will call complete()
     461             :                  * shortly.
     462             :                  */
     463           0 :                 wait_for_completion(&done);
     464             :         }
     465         380 :         task = create->result;
     466             : free_create:
     467         380 :         kfree(create);
     468         380 :         return task;
     469             : }
     470             : 
     471             : /**
     472             :  * kthread_create_on_node - create a kthread.
     473             :  * @threadfn: the function to run until signal_pending(current).
     474             :  * @data: data ptr for @threadfn.
     475             :  * @node: task and thread structures for the thread are allocated on this node
     476             :  * @namefmt: printf-style name for the thread.
     477             :  *
     478             :  * Description: This helper function creates and names a kernel
     479             :  * thread.  The thread will be stopped: use wake_up_process() to start
     480             :  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
     481             :  * is affine to all CPUs.
     482             :  *
     483             :  * If thread is going to be bound on a particular cpu, give its node
     484             :  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
     485             :  * When woken, the thread will run @threadfn() with @data as its
     486             :  * argument. @threadfn() can either return directly if it is a
     487             :  * standalone thread for which no one will call kthread_stop(), or
     488             :  * return when 'kthread_should_stop()' is true (which means
     489             :  * kthread_stop() has been called).  The return value should be zero
     490             :  * or a negative error number; it will be passed to kthread_stop().
     491             :  *
     492             :  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
     493             :  */
     494         380 : struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
     495             :                                            void *data, int node,
     496             :                                            const char namefmt[],
     497             :                                            ...)
     498             : {
     499             :         struct task_struct *task;
     500             :         va_list args;
     501             : 
     502         380 :         va_start(args, namefmt);
     503         380 :         task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
     504         380 :         va_end(args);
     505             : 
     506         380 :         return task;
     507             : }
     508             : EXPORT_SYMBOL(kthread_create_on_node);
     509             : 
     510          11 : static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
     511             : {
     512             :         unsigned long flags;
     513             : 
     514          11 :         if (!wait_task_inactive(p, state)) {
     515             :                 WARN_ON(1);
     516             :                 return;
     517             :         }
     518             : 
     519             :         /* It's safe because the task is inactive. */
     520          11 :         raw_spin_lock_irqsave(&p->pi_lock, flags);
     521          11 :         do_set_cpus_allowed(p, mask);
     522          11 :         p->flags |= PF_NO_SETAFFINITY;
     523          22 :         raw_spin_unlock_irqrestore(&p->pi_lock, flags);
     524             : }
     525             : 
     526             : static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
     527             : {
     528           2 :         __kthread_bind_mask(p, cpumask_of(cpu), state);
     529             : }
     530             : 
     531           9 : void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
     532             : {
     533           9 :         __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
     534           9 : }
     535             : 
     536             : /**
     537             :  * kthread_bind - bind a just-created kthread to a cpu.
     538             :  * @p: thread created by kthread_create().
     539             :  * @cpu: cpu (might not be online, must be possible) for @k to run on.
     540             :  *
     541             :  * Description: This function is equivalent to set_cpus_allowed(),
     542             :  * except that @cpu doesn't need to be online, and the thread must be
     543             :  * stopped (i.e., just returned from kthread_create()).
     544             :  */
     545           0 : void kthread_bind(struct task_struct *p, unsigned int cpu)
     546             : {
     547           2 :         __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
     548           0 : }
     549             : EXPORT_SYMBOL(kthread_bind);
     550             : 
     551             : /**
     552             :  * kthread_create_on_cpu - Create a cpu bound kthread
     553             :  * @threadfn: the function to run until signal_pending(current).
     554             :  * @data: data ptr for @threadfn.
     555             :  * @cpu: The cpu on which the thread should be bound,
     556             :  * @namefmt: printf-style name for the thread. Format is restricted
     557             :  *           to "name.*%u". Code fills in cpu number.
     558             :  *
     559             :  * Description: This helper function creates and names a kernel thread
     560             :  */
     561           1 : struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
     562             :                                           void *data, unsigned int cpu,
     563             :                                           const char *namefmt)
     564             : {
     565             :         struct task_struct *p;
     566             : 
     567           1 :         p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
     568             :                                    cpu);
     569           1 :         if (IS_ERR(p))
     570             :                 return p;
     571           1 :         kthread_bind(p, cpu);
     572             :         /* CPU hotplug need to bind once again when unparking the thread. */
     573           2 :         to_kthread(p)->cpu = cpu;
     574           1 :         return p;
     575             : }
     576             : EXPORT_SYMBOL(kthread_create_on_cpu);
     577             : 
     578           5 : void kthread_set_per_cpu(struct task_struct *k, int cpu)
     579             : {
     580          10 :         struct kthread *kthread = to_kthread(k);
     581           5 :         if (!kthread)
     582             :                 return;
     583             : 
     584           5 :         WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
     585             : 
     586           5 :         if (cpu < 0) {
     587           0 :                 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     588             :                 return;
     589             :         }
     590             : 
     591           5 :         kthread->cpu = cpu;
     592           5 :         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     593             : }
     594             : 
     595           0 : bool kthread_is_per_cpu(struct task_struct *p)
     596             : {
     597           0 :         struct kthread *kthread = __to_kthread(p);
     598           0 :         if (!kthread)
     599             :                 return false;
     600             : 
     601           0 :         return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     602             : }
     603             : 
     604             : /**
     605             :  * kthread_unpark - unpark a thread created by kthread_create().
     606             :  * @k:          thread created by kthread_create().
     607             :  *
     608             :  * Sets kthread_should_park() for @k to return false, wakes it, and
     609             :  * waits for it to return. If the thread is marked percpu then its
     610             :  * bound to the cpu again.
     611             :  */
     612           1 : void kthread_unpark(struct task_struct *k)
     613             : {
     614           2 :         struct kthread *kthread = to_kthread(k);
     615             : 
     616             :         /*
     617             :          * Newly created kthread was parked when the CPU was offline.
     618             :          * The binding was lost and we need to set it again.
     619             :          */
     620           2 :         if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
     621           1 :                 __kthread_bind(k, kthread->cpu, TASK_PARKED);
     622             : 
     623           2 :         clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     624             :         /*
     625             :          * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
     626             :          */
     627           1 :         wake_up_state(k, TASK_PARKED);
     628           1 : }
     629             : EXPORT_SYMBOL_GPL(kthread_unpark);
     630             : 
     631             : /**
     632             :  * kthread_park - park a thread created by kthread_create().
     633             :  * @k: thread created by kthread_create().
     634             :  *
     635             :  * Sets kthread_should_park() for @k to return true, wakes it, and
     636             :  * waits for it to return. This can also be called after kthread_create()
     637             :  * instead of calling wake_up_process(): the thread will park without
     638             :  * calling threadfn().
     639             :  *
     640             :  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
     641             :  * If called by the kthread itself just the park bit is set.
     642             :  */
     643           1 : int kthread_park(struct task_struct *k)
     644             : {
     645           2 :         struct kthread *kthread = to_kthread(k);
     646             : 
     647           1 :         if (WARN_ON(k->flags & PF_EXITING))
     648             :                 return -ENOSYS;
     649             : 
     650           2 :         if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
     651             :                 return -EBUSY;
     652             : 
     653           2 :         set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     654           1 :         if (k != current) {
     655           1 :                 wake_up_process(k);
     656             :                 /*
     657             :                  * Wait for __kthread_parkme() to complete(), this means we
     658             :                  * _will_ have TASK_PARKED and are about to call schedule().
     659             :                  */
     660           1 :                 wait_for_completion(&kthread->parked);
     661             :                 /*
     662             :                  * Now wait for that schedule() to complete and the task to
     663             :                  * get scheduled out.
     664             :                  */
     665           1 :                 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
     666             :         }
     667             : 
     668             :         return 0;
     669             : }
     670             : EXPORT_SYMBOL_GPL(kthread_park);
     671             : 
     672             : /**
     673             :  * kthread_stop - stop a thread created by kthread_create().
     674             :  * @k: thread created by kthread_create().
     675             :  *
     676             :  * Sets kthread_should_stop() for @k to return true, wakes it, and
     677             :  * waits for it to exit. This can also be called after kthread_create()
     678             :  * instead of calling wake_up_process(): the thread will exit without
     679             :  * calling threadfn().
     680             :  *
     681             :  * If threadfn() may call kthread_exit() itself, the caller must ensure
     682             :  * task_struct can't go away.
     683             :  *
     684             :  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
     685             :  * was never called.
     686             :  */
     687           0 : int kthread_stop(struct task_struct *k)
     688             : {
     689             :         struct kthread *kthread;
     690             :         int ret;
     691             : 
     692           0 :         trace_sched_kthread_stop(k);
     693             : 
     694           0 :         get_task_struct(k);
     695           0 :         kthread = to_kthread(k);
     696           0 :         set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
     697           0 :         kthread_unpark(k);
     698           0 :         set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
     699           0 :         wake_up_process(k);
     700           0 :         wait_for_completion(&kthread->exited);
     701           0 :         ret = kthread->result;
     702           0 :         put_task_struct(k);
     703             : 
     704           0 :         trace_sched_kthread_stop_ret(ret);
     705           0 :         return ret;
     706             : }
     707             : EXPORT_SYMBOL(kthread_stop);
     708             : 
     709           1 : int kthreadd(void *unused)
     710             : {
     711           1 :         struct task_struct *tsk = current;
     712             : 
     713             :         /* Setup a clean context for our children to inherit. */
     714           1 :         set_task_comm(tsk, "kthreadd");
     715           1 :         ignore_signals(tsk);
     716           2 :         set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
     717           1 :         set_mems_allowed(node_states[N_MEMORY]);
     718             : 
     719           1 :         current->flags |= PF_NOFREEZE;
     720             :         cgroup_init_kthreadd();
     721             : 
     722             :         for (;;) {
     723         380 :                 set_current_state(TASK_INTERRUPTIBLE);
     724         380 :                 if (list_empty(&kthread_create_list))
     725         380 :                         schedule();
     726         379 :                 __set_current_state(TASK_RUNNING);
     727             : 
     728             :                 spin_lock(&kthread_create_lock);
     729         759 :                 while (!list_empty(&kthread_create_list)) {
     730             :                         struct kthread_create_info *create;
     731             : 
     732         380 :                         create = list_entry(kthread_create_list.next,
     733             :                                             struct kthread_create_info, list);
     734         760 :                         list_del_init(&create->list);
     735         380 :                         spin_unlock(&kthread_create_lock);
     736             : 
     737         380 :                         create_kthread(create);
     738             : 
     739             :                         spin_lock(&kthread_create_lock);
     740             :                 }
     741             :                 spin_unlock(&kthread_create_lock);
     742             :         }
     743             : 
     744             :         return 0;
     745             : }
     746             : 
     747           0 : void __kthread_init_worker(struct kthread_worker *worker,
     748             :                                 const char *name,
     749             :                                 struct lock_class_key *key)
     750             : {
     751           0 :         memset(worker, 0, sizeof(struct kthread_worker));
     752             :         raw_spin_lock_init(&worker->lock);
     753             :         lockdep_set_class_and_name(&worker->lock, key, name);
     754           0 :         INIT_LIST_HEAD(&worker->work_list);
     755           0 :         INIT_LIST_HEAD(&worker->delayed_work_list);
     756           0 : }
     757             : EXPORT_SYMBOL_GPL(__kthread_init_worker);
     758             : 
     759             : /**
     760             :  * kthread_worker_fn - kthread function to process kthread_worker
     761             :  * @worker_ptr: pointer to initialized kthread_worker
     762             :  *
     763             :  * This function implements the main cycle of kthread worker. It processes
     764             :  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
     765             :  * is empty.
     766             :  *
     767             :  * The works are not allowed to keep any locks, disable preemption or interrupts
     768             :  * when they finish. There is defined a safe point for freezing when one work
     769             :  * finishes and before a new one is started.
     770             :  *
     771             :  * Also the works must not be handled by more than one worker at the same time,
     772             :  * see also kthread_queue_work().
     773             :  */
     774           0 : int kthread_worker_fn(void *worker_ptr)
     775             : {
     776           0 :         struct kthread_worker *worker = worker_ptr;
     777             :         struct kthread_work *work;
     778             : 
     779             :         /*
     780             :          * FIXME: Update the check and remove the assignment when all kthread
     781             :          * worker users are created using kthread_create_worker*() functions.
     782             :          */
     783           0 :         WARN_ON(worker->task && worker->task != current);
     784           0 :         worker->task = current;
     785             : 
     786           0 :         if (worker->flags & KTW_FREEZABLE)
     787           0 :                 set_freezable();
     788             : 
     789             : repeat:
     790           0 :         set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
     791             : 
     792           0 :         if (kthread_should_stop()) {
     793           0 :                 __set_current_state(TASK_RUNNING);
     794           0 :                 raw_spin_lock_irq(&worker->lock);
     795           0 :                 worker->task = NULL;
     796           0 :                 raw_spin_unlock_irq(&worker->lock);
     797           0 :                 return 0;
     798             :         }
     799             : 
     800           0 :         work = NULL;
     801           0 :         raw_spin_lock_irq(&worker->lock);
     802           0 :         if (!list_empty(&worker->work_list)) {
     803           0 :                 work = list_first_entry(&worker->work_list,
     804             :                                         struct kthread_work, node);
     805           0 :                 list_del_init(&work->node);
     806             :         }
     807           0 :         worker->current_work = work;
     808           0 :         raw_spin_unlock_irq(&worker->lock);
     809             : 
     810           0 :         if (work) {
     811           0 :                 kthread_work_func_t func = work->func;
     812           0 :                 __set_current_state(TASK_RUNNING);
     813           0 :                 trace_sched_kthread_work_execute_start(work);
     814           0 :                 work->func(work);
     815             :                 /*
     816             :                  * Avoid dereferencing work after this point.  The trace
     817             :                  * event only cares about the address.
     818             :                  */
     819           0 :                 trace_sched_kthread_work_execute_end(work, func);
     820           0 :         } else if (!freezing(current))
     821           0 :                 schedule();
     822             : 
     823           0 :         try_to_freeze();
     824           0 :         cond_resched();
     825           0 :         goto repeat;
     826             : }
     827             : EXPORT_SYMBOL_GPL(kthread_worker_fn);
     828             : 
     829             : static __printf(3, 0) struct kthread_worker *
     830           0 : __kthread_create_worker(int cpu, unsigned int flags,
     831             :                         const char namefmt[], va_list args)
     832             : {
     833             :         struct kthread_worker *worker;
     834             :         struct task_struct *task;
     835           0 :         int node = NUMA_NO_NODE;
     836             : 
     837           0 :         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
     838           0 :         if (!worker)
     839             :                 return ERR_PTR(-ENOMEM);
     840             : 
     841           0 :         kthread_init_worker(worker);
     842             : 
     843           0 :         if (cpu >= 0)
     844           0 :                 node = cpu_to_node(cpu);
     845             : 
     846           0 :         task = __kthread_create_on_node(kthread_worker_fn, worker,
     847             :                                                 node, namefmt, args);
     848           0 :         if (IS_ERR(task))
     849             :                 goto fail_task;
     850             : 
     851           0 :         if (cpu >= 0)
     852           0 :                 kthread_bind(task, cpu);
     853             : 
     854           0 :         worker->flags = flags;
     855           0 :         worker->task = task;
     856           0 :         wake_up_process(task);
     857           0 :         return worker;
     858             : 
     859             : fail_task:
     860           0 :         kfree(worker);
     861           0 :         return ERR_CAST(task);
     862             : }
     863             : 
     864             : /**
     865             :  * kthread_create_worker - create a kthread worker
     866             :  * @flags: flags modifying the default behavior of the worker
     867             :  * @namefmt: printf-style name for the kthread worker (task).
     868             :  *
     869             :  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     870             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     871             :  * when the caller was killed by a fatal signal.
     872             :  */
     873             : struct kthread_worker *
     874           0 : kthread_create_worker(unsigned int flags, const char namefmt[], ...)
     875             : {
     876             :         struct kthread_worker *worker;
     877             :         va_list args;
     878             : 
     879           0 :         va_start(args, namefmt);
     880           0 :         worker = __kthread_create_worker(-1, flags, namefmt, args);
     881           0 :         va_end(args);
     882             : 
     883           0 :         return worker;
     884             : }
     885             : EXPORT_SYMBOL(kthread_create_worker);
     886             : 
     887             : /**
     888             :  * kthread_create_worker_on_cpu - create a kthread worker and bind it
     889             :  *      to a given CPU and the associated NUMA node.
     890             :  * @cpu: CPU number
     891             :  * @flags: flags modifying the default behavior of the worker
     892             :  * @namefmt: printf-style name for the kthread worker (task).
     893             :  *
     894             :  * Use a valid CPU number if you want to bind the kthread worker
     895             :  * to the given CPU and the associated NUMA node.
     896             :  *
     897             :  * A good practice is to add the cpu number also into the worker name.
     898             :  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
     899             :  *
     900             :  * CPU hotplug:
     901             :  * The kthread worker API is simple and generic. It just provides a way
     902             :  * to create, use, and destroy workers.
     903             :  *
     904             :  * It is up to the API user how to handle CPU hotplug. They have to decide
     905             :  * how to handle pending work items, prevent queuing new ones, and
     906             :  * restore the functionality when the CPU goes off and on. There are a
     907             :  * few catches:
     908             :  *
     909             :  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
     910             :  *
     911             :  *    - The worker might not exist when the CPU was off when the user
     912             :  *      created the workers.
     913             :  *
     914             :  * Good practice is to implement two CPU hotplug callbacks and to
     915             :  * destroy/create the worker when the CPU goes down/up.
     916             :  *
     917             :  * Return:
     918             :  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     919             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     920             :  * when the caller was killed by a fatal signal.
     921             :  */
     922             : struct kthread_worker *
     923           0 : kthread_create_worker_on_cpu(int cpu, unsigned int flags,
     924             :                              const char namefmt[], ...)
     925             : {
     926             :         struct kthread_worker *worker;
     927             :         va_list args;
     928             : 
     929           0 :         va_start(args, namefmt);
     930           0 :         worker = __kthread_create_worker(cpu, flags, namefmt, args);
     931           0 :         va_end(args);
     932             : 
     933           0 :         return worker;
     934             : }
     935             : EXPORT_SYMBOL(kthread_create_worker_on_cpu);
     936             : 
     937             : /*
     938             :  * Returns true when the work could not be queued at the moment.
     939             :  * It happens when it is already pending in a worker list
     940             :  * or when it is being cancelled.
     941             :  */
     942             : static inline bool queuing_blocked(struct kthread_worker *worker,
     943             :                                    struct kthread_work *work)
     944             : {
     945             :         lockdep_assert_held(&worker->lock);
     946             : 
     947           0 :         return !list_empty(&work->node) || work->canceling;
     948             : }
     949             : 
     950           0 : static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
     951             :                                              struct kthread_work *work)
     952             : {
     953             :         lockdep_assert_held(&worker->lock);
     954           0 :         WARN_ON_ONCE(!list_empty(&work->node));
     955             :         /* Do not use a work with >1 worker, see kthread_queue_work() */
     956           0 :         WARN_ON_ONCE(work->worker && work->worker != worker);
     957           0 : }
     958             : 
     959             : /* insert @work before @pos in @worker */
     960           0 : static void kthread_insert_work(struct kthread_worker *worker,
     961             :                                 struct kthread_work *work,
     962             :                                 struct list_head *pos)
     963             : {
     964           0 :         kthread_insert_work_sanity_check(worker, work);
     965             : 
     966           0 :         trace_sched_kthread_work_queue_work(worker, work);
     967             : 
     968           0 :         list_add_tail(&work->node, pos);
     969           0 :         work->worker = worker;
     970           0 :         if (!worker->current_work && likely(worker->task))
     971           0 :                 wake_up_process(worker->task);
     972           0 : }
     973             : 
     974             : /**
     975             :  * kthread_queue_work - queue a kthread_work
     976             :  * @worker: target kthread_worker
     977             :  * @work: kthread_work to queue
     978             :  *
     979             :  * Queue @work to work processor @task for async execution.  @task
     980             :  * must have been created with kthread_worker_create().  Returns %true
     981             :  * if @work was successfully queued, %false if it was already pending.
     982             :  *
     983             :  * Reinitialize the work if it needs to be used by another worker.
     984             :  * For example, when the worker was stopped and started again.
     985             :  */
     986           0 : bool kthread_queue_work(struct kthread_worker *worker,
     987             :                         struct kthread_work *work)
     988             : {
     989           0 :         bool ret = false;
     990             :         unsigned long flags;
     991             : 
     992           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
     993           0 :         if (!queuing_blocked(worker, work)) {
     994           0 :                 kthread_insert_work(worker, work, &worker->work_list);
     995           0 :                 ret = true;
     996             :         }
     997           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
     998           0 :         return ret;
     999             : }
    1000             : EXPORT_SYMBOL_GPL(kthread_queue_work);
    1001             : 
    1002             : /**
    1003             :  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
    1004             :  *      delayed work when the timer expires.
    1005             :  * @t: pointer to the expired timer
    1006             :  *
    1007             :  * The format of the function is defined by struct timer_list.
    1008             :  * It should have been called from irqsafe timer with irq already off.
    1009             :  */
    1010           0 : void kthread_delayed_work_timer_fn(struct timer_list *t)
    1011             : {
    1012           0 :         struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
    1013           0 :         struct kthread_work *work = &dwork->work;
    1014           0 :         struct kthread_worker *worker = work->worker;
    1015             :         unsigned long flags;
    1016             : 
    1017             :         /*
    1018             :          * This might happen when a pending work is reinitialized.
    1019             :          * It means that it is used a wrong way.
    1020             :          */
    1021           0 :         if (WARN_ON_ONCE(!worker))
    1022             :                 return;
    1023             : 
    1024           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1025             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1026           0 :         WARN_ON_ONCE(work->worker != worker);
    1027             : 
    1028             :         /* Move the work from worker->delayed_work_list. */
    1029           0 :         WARN_ON_ONCE(list_empty(&work->node));
    1030           0 :         list_del_init(&work->node);
    1031           0 :         if (!work->canceling)
    1032           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1033             : 
    1034           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1035             : }
    1036             : EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
    1037             : 
    1038           0 : static void __kthread_queue_delayed_work(struct kthread_worker *worker,
    1039             :                                          struct kthread_delayed_work *dwork,
    1040             :                                          unsigned long delay)
    1041             : {
    1042           0 :         struct timer_list *timer = &dwork->timer;
    1043           0 :         struct kthread_work *work = &dwork->work;
    1044             : 
    1045           0 :         WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
    1046             : 
    1047             :         /*
    1048             :          * If @delay is 0, queue @dwork->work immediately.  This is for
    1049             :          * both optimization and correctness.  The earliest @timer can
    1050             :          * expire is on the closest next tick and delayed_work users depend
    1051             :          * on that there's no such delay when @delay is 0.
    1052             :          */
    1053           0 :         if (!delay) {
    1054           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1055           0 :                 return;
    1056             :         }
    1057             : 
    1058             :         /* Be paranoid and try to detect possible races already now. */
    1059           0 :         kthread_insert_work_sanity_check(worker, work);
    1060             : 
    1061           0 :         list_add(&work->node, &worker->delayed_work_list);
    1062           0 :         work->worker = worker;
    1063           0 :         timer->expires = jiffies + delay;
    1064           0 :         add_timer(timer);
    1065             : }
    1066             : 
    1067             : /**
    1068             :  * kthread_queue_delayed_work - queue the associated kthread work
    1069             :  *      after a delay.
    1070             :  * @worker: target kthread_worker
    1071             :  * @dwork: kthread_delayed_work to queue
    1072             :  * @delay: number of jiffies to wait before queuing
    1073             :  *
    1074             :  * If the work has not been pending it starts a timer that will queue
    1075             :  * the work after the given @delay. If @delay is zero, it queues the
    1076             :  * work immediately.
    1077             :  *
    1078             :  * Return: %false if the @work has already been pending. It means that
    1079             :  * either the timer was running or the work was queued. It returns %true
    1080             :  * otherwise.
    1081             :  */
    1082           0 : bool kthread_queue_delayed_work(struct kthread_worker *worker,
    1083             :                                 struct kthread_delayed_work *dwork,
    1084             :                                 unsigned long delay)
    1085             : {
    1086           0 :         struct kthread_work *work = &dwork->work;
    1087             :         unsigned long flags;
    1088           0 :         bool ret = false;
    1089             : 
    1090           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1091             : 
    1092           0 :         if (!queuing_blocked(worker, work)) {
    1093           0 :                 __kthread_queue_delayed_work(worker, dwork, delay);
    1094           0 :                 ret = true;
    1095             :         }
    1096             : 
    1097           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1098           0 :         return ret;
    1099             : }
    1100             : EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
    1101             : 
    1102             : struct kthread_flush_work {
    1103             :         struct kthread_work     work;
    1104             :         struct completion       done;
    1105             : };
    1106             : 
    1107           0 : static void kthread_flush_work_fn(struct kthread_work *work)
    1108             : {
    1109           0 :         struct kthread_flush_work *fwork =
    1110           0 :                 container_of(work, struct kthread_flush_work, work);
    1111           0 :         complete(&fwork->done);
    1112           0 : }
    1113             : 
    1114             : /**
    1115             :  * kthread_flush_work - flush a kthread_work
    1116             :  * @work: work to flush
    1117             :  *
    1118             :  * If @work is queued or executing, wait for it to finish execution.
    1119             :  */
    1120           0 : void kthread_flush_work(struct kthread_work *work)
    1121             : {
    1122           0 :         struct kthread_flush_work fwork = {
    1123             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1124           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1125             :         };
    1126             :         struct kthread_worker *worker;
    1127           0 :         bool noop = false;
    1128             : 
    1129           0 :         worker = work->worker;
    1130           0 :         if (!worker)
    1131           0 :                 return;
    1132             : 
    1133           0 :         raw_spin_lock_irq(&worker->lock);
    1134             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1135           0 :         WARN_ON_ONCE(work->worker != worker);
    1136             : 
    1137           0 :         if (!list_empty(&work->node))
    1138           0 :                 kthread_insert_work(worker, &fwork.work, work->node.next);
    1139           0 :         else if (worker->current_work == work)
    1140           0 :                 kthread_insert_work(worker, &fwork.work,
    1141             :                                     worker->work_list.next);
    1142             :         else
    1143             :                 noop = true;
    1144             : 
    1145           0 :         raw_spin_unlock_irq(&worker->lock);
    1146             : 
    1147           0 :         if (!noop)
    1148           0 :                 wait_for_completion(&fwork.done);
    1149             : }
    1150             : EXPORT_SYMBOL_GPL(kthread_flush_work);
    1151             : 
    1152             : /*
    1153             :  * Make sure that the timer is neither set nor running and could
    1154             :  * not manipulate the work list_head any longer.
    1155             :  *
    1156             :  * The function is called under worker->lock. The lock is temporary
    1157             :  * released but the timer can't be set again in the meantime.
    1158             :  */
    1159           0 : static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
    1160             :                                               unsigned long *flags)
    1161             : {
    1162           0 :         struct kthread_delayed_work *dwork =
    1163           0 :                 container_of(work, struct kthread_delayed_work, work);
    1164           0 :         struct kthread_worker *worker = work->worker;
    1165             : 
    1166             :         /*
    1167             :          * del_timer_sync() must be called to make sure that the timer
    1168             :          * callback is not running. The lock must be temporary released
    1169             :          * to avoid a deadlock with the callback. In the meantime,
    1170             :          * any queuing is blocked by setting the canceling counter.
    1171             :          */
    1172           0 :         work->canceling++;
    1173           0 :         raw_spin_unlock_irqrestore(&worker->lock, *flags);
    1174           0 :         del_timer_sync(&dwork->timer);
    1175           0 :         raw_spin_lock_irqsave(&worker->lock, *flags);
    1176           0 :         work->canceling--;
    1177           0 : }
    1178             : 
    1179             : /*
    1180             :  * This function removes the work from the worker queue.
    1181             :  *
    1182             :  * It is called under worker->lock. The caller must make sure that
    1183             :  * the timer used by delayed work is not running, e.g. by calling
    1184             :  * kthread_cancel_delayed_work_timer().
    1185             :  *
    1186             :  * The work might still be in use when this function finishes. See the
    1187             :  * current_work proceed by the worker.
    1188             :  *
    1189             :  * Return: %true if @work was pending and successfully canceled,
    1190             :  *      %false if @work was not pending
    1191             :  */
    1192             : static bool __kthread_cancel_work(struct kthread_work *work)
    1193             : {
    1194             :         /*
    1195             :          * Try to remove the work from a worker list. It might either
    1196             :          * be from worker->work_list or from worker->delayed_work_list.
    1197             :          */
    1198           0 :         if (!list_empty(&work->node)) {
    1199           0 :                 list_del_init(&work->node);
    1200             :                 return true;
    1201             :         }
    1202             : 
    1203             :         return false;
    1204             : }
    1205             : 
    1206             : /**
    1207             :  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
    1208             :  * @worker: kthread worker to use
    1209             :  * @dwork: kthread delayed work to queue
    1210             :  * @delay: number of jiffies to wait before queuing
    1211             :  *
    1212             :  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
    1213             :  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
    1214             :  * @work is guaranteed to be queued immediately.
    1215             :  *
    1216             :  * Return: %false if @dwork was idle and queued, %true otherwise.
    1217             :  *
    1218             :  * A special case is when the work is being canceled in parallel.
    1219             :  * It might be caused either by the real kthread_cancel_delayed_work_sync()
    1220             :  * or yet another kthread_mod_delayed_work() call. We let the other command
    1221             :  * win and return %true here. The return value can be used for reference
    1222             :  * counting and the number of queued works stays the same. Anyway, the caller
    1223             :  * is supposed to synchronize these operations a reasonable way.
    1224             :  *
    1225             :  * This function is safe to call from any context including IRQ handler.
    1226             :  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
    1227             :  * for details.
    1228             :  */
    1229           0 : bool kthread_mod_delayed_work(struct kthread_worker *worker,
    1230             :                               struct kthread_delayed_work *dwork,
    1231             :                               unsigned long delay)
    1232             : {
    1233           0 :         struct kthread_work *work = &dwork->work;
    1234             :         unsigned long flags;
    1235             :         int ret;
    1236             : 
    1237           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1238             : 
    1239             :         /* Do not bother with canceling when never queued. */
    1240           0 :         if (!work->worker) {
    1241             :                 ret = false;
    1242             :                 goto fast_queue;
    1243             :         }
    1244             : 
    1245             :         /* Work must not be used with >1 worker, see kthread_queue_work() */
    1246           0 :         WARN_ON_ONCE(work->worker != worker);
    1247             : 
    1248             :         /*
    1249             :          * Temporary cancel the work but do not fight with another command
    1250             :          * that is canceling the work as well.
    1251             :          *
    1252             :          * It is a bit tricky because of possible races with another
    1253             :          * mod_delayed_work() and cancel_delayed_work() callers.
    1254             :          *
    1255             :          * The timer must be canceled first because worker->lock is released
    1256             :          * when doing so. But the work can be removed from the queue (list)
    1257             :          * only when it can be queued again so that the return value can
    1258             :          * be used for reference counting.
    1259             :          */
    1260           0 :         kthread_cancel_delayed_work_timer(work, &flags);
    1261           0 :         if (work->canceling) {
    1262             :                 /* The number of works in the queue does not change. */
    1263             :                 ret = true;
    1264             :                 goto out;
    1265             :         }
    1266           0 :         ret = __kthread_cancel_work(work);
    1267             : 
    1268             : fast_queue:
    1269           0 :         __kthread_queue_delayed_work(worker, dwork, delay);
    1270             : out:
    1271           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1272           0 :         return ret;
    1273             : }
    1274             : EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
    1275             : 
    1276           0 : static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
    1277             : {
    1278           0 :         struct kthread_worker *worker = work->worker;
    1279             :         unsigned long flags;
    1280           0 :         int ret = false;
    1281             : 
    1282           0 :         if (!worker)
    1283             :                 goto out;
    1284             : 
    1285           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1286             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1287           0 :         WARN_ON_ONCE(work->worker != worker);
    1288             : 
    1289           0 :         if (is_dwork)
    1290           0 :                 kthread_cancel_delayed_work_timer(work, &flags);
    1291             : 
    1292           0 :         ret = __kthread_cancel_work(work);
    1293             : 
    1294           0 :         if (worker->current_work != work)
    1295             :                 goto out_fast;
    1296             : 
    1297             :         /*
    1298             :          * The work is in progress and we need to wait with the lock released.
    1299             :          * In the meantime, block any queuing by setting the canceling counter.
    1300             :          */
    1301           0 :         work->canceling++;
    1302           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1303           0 :         kthread_flush_work(work);
    1304           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1305           0 :         work->canceling--;
    1306             : 
    1307             : out_fast:
    1308           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1309             : out:
    1310           0 :         return ret;
    1311             : }
    1312             : 
    1313             : /**
    1314             :  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
    1315             :  * @work: the kthread work to cancel
    1316             :  *
    1317             :  * Cancel @work and wait for its execution to finish.  This function
    1318             :  * can be used even if the work re-queues itself. On return from this
    1319             :  * function, @work is guaranteed to be not pending or executing on any CPU.
    1320             :  *
    1321             :  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
    1322             :  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
    1323             :  *
    1324             :  * The caller must ensure that the worker on which @work was last
    1325             :  * queued can't be destroyed before this function returns.
    1326             :  *
    1327             :  * Return: %true if @work was pending, %false otherwise.
    1328             :  */
    1329           0 : bool kthread_cancel_work_sync(struct kthread_work *work)
    1330             : {
    1331           0 :         return __kthread_cancel_work_sync(work, false);
    1332             : }
    1333             : EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
    1334             : 
    1335             : /**
    1336             :  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
    1337             :  *      wait for it to finish.
    1338             :  * @dwork: the kthread delayed work to cancel
    1339             :  *
    1340             :  * This is kthread_cancel_work_sync() for delayed works.
    1341             :  *
    1342             :  * Return: %true if @dwork was pending, %false otherwise.
    1343             :  */
    1344           0 : bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
    1345             : {
    1346           0 :         return __kthread_cancel_work_sync(&dwork->work, true);
    1347             : }
    1348             : EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
    1349             : 
    1350             : /**
    1351             :  * kthread_flush_worker - flush all current works on a kthread_worker
    1352             :  * @worker: worker to flush
    1353             :  *
    1354             :  * Wait until all currently executing or pending works on @worker are
    1355             :  * finished.
    1356             :  */
    1357           0 : void kthread_flush_worker(struct kthread_worker *worker)
    1358             : {
    1359           0 :         struct kthread_flush_work fwork = {
    1360             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1361           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1362             :         };
    1363             : 
    1364           0 :         kthread_queue_work(worker, &fwork.work);
    1365           0 :         wait_for_completion(&fwork.done);
    1366           0 : }
    1367             : EXPORT_SYMBOL_GPL(kthread_flush_worker);
    1368             : 
    1369             : /**
    1370             :  * kthread_destroy_worker - destroy a kthread worker
    1371             :  * @worker: worker to be destroyed
    1372             :  *
    1373             :  * Flush and destroy @worker.  The simple flush is enough because the kthread
    1374             :  * worker API is used only in trivial scenarios.  There are no multi-step state
    1375             :  * machines needed.
    1376             :  *
    1377             :  * Note that this function is not responsible for handling delayed work, so
    1378             :  * caller should be responsible for queuing or canceling all delayed work items
    1379             :  * before invoke this function.
    1380             :  */
    1381           0 : void kthread_destroy_worker(struct kthread_worker *worker)
    1382             : {
    1383             :         struct task_struct *task;
    1384             : 
    1385           0 :         task = worker->task;
    1386           0 :         if (WARN_ON(!task))
    1387             :                 return;
    1388             : 
    1389           0 :         kthread_flush_worker(worker);
    1390           0 :         kthread_stop(task);
    1391           0 :         WARN_ON(!list_empty(&worker->delayed_work_list));
    1392           0 :         WARN_ON(!list_empty(&worker->work_list));
    1393           0 :         kfree(worker);
    1394             : }
    1395             : EXPORT_SYMBOL(kthread_destroy_worker);
    1396             : 
    1397             : /**
    1398             :  * kthread_use_mm - make the calling kthread operate on an address space
    1399             :  * @mm: address space to operate on
    1400             :  */
    1401           0 : void kthread_use_mm(struct mm_struct *mm)
    1402             : {
    1403             :         struct mm_struct *active_mm;
    1404           0 :         struct task_struct *tsk = current;
    1405             : 
    1406           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1407           0 :         WARN_ON_ONCE(tsk->mm);
    1408             : 
    1409             :         /*
    1410             :          * It is possible for mm to be the same as tsk->active_mm, but
    1411             :          * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
    1412             :          * because these references are not equivalent.
    1413             :          */
    1414           0 :         mmgrab(mm);
    1415             : 
    1416           0 :         task_lock(tsk);
    1417             :         /* Hold off tlb flush IPIs while switching mm's */
    1418             :         local_irq_disable();
    1419           0 :         active_mm = tsk->active_mm;
    1420           0 :         tsk->active_mm = mm;
    1421           0 :         tsk->mm = mm;
    1422           0 :         membarrier_update_current_mm(mm);
    1423           0 :         switch_mm_irqs_off(active_mm, mm, tsk);
    1424             :         local_irq_enable();
    1425           0 :         task_unlock(tsk);
    1426             : #ifdef finish_arch_post_lock_switch
    1427             :         finish_arch_post_lock_switch();
    1428             : #endif
    1429             : 
    1430             :         /*
    1431             :          * When a kthread starts operating on an address space, the loop
    1432             :          * in membarrier_{private,global}_expedited() may not observe
    1433             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1434             :          * memory barrier after storing to tsk->mm, before accessing
    1435             :          * user-space memory. A full memory barrier for membarrier
    1436             :          * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
    1437             :          * mmdrop_lazy_tlb().
    1438             :          */
    1439           0 :         mmdrop_lazy_tlb(active_mm);
    1440           0 : }
    1441             : EXPORT_SYMBOL_GPL(kthread_use_mm);
    1442             : 
    1443             : /**
    1444             :  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
    1445             :  * @mm: address space to operate on
    1446             :  */
    1447           0 : void kthread_unuse_mm(struct mm_struct *mm)
    1448             : {
    1449           0 :         struct task_struct *tsk = current;
    1450             : 
    1451           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1452           0 :         WARN_ON_ONCE(!tsk->mm);
    1453             : 
    1454           0 :         task_lock(tsk);
    1455             :         /*
    1456             :          * When a kthread stops operating on an address space, the loop
    1457             :          * in membarrier_{private,global}_expedited() may not observe
    1458             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1459             :          * memory barrier after accessing user-space memory, before
    1460             :          * clearing tsk->mm.
    1461             :          */
    1462             :         smp_mb__after_spinlock();
    1463           0 :         sync_mm_rss(mm);
    1464             :         local_irq_disable();
    1465           0 :         tsk->mm = NULL;
    1466           0 :         membarrier_update_current_mm(NULL);
    1467           0 :         mmgrab_lazy_tlb(mm);
    1468             :         /* active_mm is still 'mm' */
    1469           0 :         enter_lazy_tlb(mm, tsk);
    1470             :         local_irq_enable();
    1471           0 :         task_unlock(tsk);
    1472             : 
    1473           0 :         mmdrop(mm);
    1474           0 : }
    1475             : EXPORT_SYMBOL_GPL(kthread_unuse_mm);
    1476             : 
    1477             : #ifdef CONFIG_BLK_CGROUP
    1478             : /**
    1479             :  * kthread_associate_blkcg - associate blkcg to current kthread
    1480             :  * @css: the cgroup info
    1481             :  *
    1482             :  * Current thread must be a kthread. The thread is running jobs on behalf of
    1483             :  * other threads. In some cases, we expect the jobs attach cgroup info of
    1484             :  * original threads instead of that of current thread. This function stores
    1485             :  * original thread's cgroup info in current kthread context for later
    1486             :  * retrieval.
    1487             :  */
    1488             : void kthread_associate_blkcg(struct cgroup_subsys_state *css)
    1489             : {
    1490             :         struct kthread *kthread;
    1491             : 
    1492             :         if (!(current->flags & PF_KTHREAD))
    1493             :                 return;
    1494             :         kthread = to_kthread(current);
    1495             :         if (!kthread)
    1496             :                 return;
    1497             : 
    1498             :         if (kthread->blkcg_css) {
    1499             :                 css_put(kthread->blkcg_css);
    1500             :                 kthread->blkcg_css = NULL;
    1501             :         }
    1502             :         if (css) {
    1503             :                 css_get(css);
    1504             :                 kthread->blkcg_css = css;
    1505             :         }
    1506             : }
    1507             : EXPORT_SYMBOL(kthread_associate_blkcg);
    1508             : 
    1509             : /**
    1510             :  * kthread_blkcg - get associated blkcg css of current kthread
    1511             :  *
    1512             :  * Current thread must be a kthread.
    1513             :  */
    1514             : struct cgroup_subsys_state *kthread_blkcg(void)
    1515             : {
    1516             :         struct kthread *kthread;
    1517             : 
    1518             :         if (current->flags & PF_KTHREAD) {
    1519             :                 kthread = to_kthread(current);
    1520             :                 if (kthread)
    1521             :                         return kthread->blkcg_css;
    1522             :         }
    1523             :         return NULL;
    1524             : }
    1525             : #endif

Generated by: LCOV version 1.14