LCOV - code coverage report
Current view: top level - kernel - kthread.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 153 436 35.1 %
Date: 2023-03-27 20:00:47 Functions: 21 51 41.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /* Kernel thread helper functions.
       3             :  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
       4             :  *   Copyright (C) 2009 Red Hat, Inc.
       5             :  *
       6             :  * Creation is done via kthreadd, so that we get a clean environment
       7             :  * even if we're invoked from userspace (think modprobe, hotplug cpu,
       8             :  * etc.).
       9             :  */
      10             : #include <uapi/linux/sched/types.h>
      11             : #include <linux/mm.h>
      12             : #include <linux/mmu_context.h>
      13             : #include <linux/sched.h>
      14             : #include <linux/sched/mm.h>
      15             : #include <linux/sched/task.h>
      16             : #include <linux/kthread.h>
      17             : #include <linux/completion.h>
      18             : #include <linux/err.h>
      19             : #include <linux/cgroup.h>
      20             : #include <linux/cpuset.h>
      21             : #include <linux/unistd.h>
      22             : #include <linux/file.h>
      23             : #include <linux/export.h>
      24             : #include <linux/mutex.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/freezer.h>
      27             : #include <linux/ptrace.h>
      28             : #include <linux/uaccess.h>
      29             : #include <linux/numa.h>
      30             : #include <linux/sched/isolation.h>
      31             : #include <trace/events/sched.h>
      32             : 
      33             : 
      34             : static DEFINE_SPINLOCK(kthread_create_lock);
      35             : static LIST_HEAD(kthread_create_list);
      36             : struct task_struct *kthreadd_task;
      37             : 
      38             : struct kthread_create_info
      39             : {
      40             :         /* Information passed to kthread() from kthreadd. */
      41             :         int (*threadfn)(void *data);
      42             :         void *data;
      43             :         int node;
      44             : 
      45             :         /* Result passed back to kthread_create() from kthreadd. */
      46             :         struct task_struct *result;
      47             :         struct completion *done;
      48             : 
      49             :         struct list_head list;
      50             : };
      51             : 
      52             : struct kthread {
      53             :         unsigned long flags;
      54             :         unsigned int cpu;
      55             :         int result;
      56             :         int (*threadfn)(void *);
      57             :         void *data;
      58             :         struct completion parked;
      59             :         struct completion exited;
      60             : #ifdef CONFIG_BLK_CGROUP
      61             :         struct cgroup_subsys_state *blkcg_css;
      62             : #endif
      63             :         /* To store the full name if task comm is truncated. */
      64             :         char *full_name;
      65             : };
      66             : 
      67             : enum KTHREAD_BITS {
      68             :         KTHREAD_IS_PER_CPU = 0,
      69             :         KTHREAD_SHOULD_STOP,
      70             :         KTHREAD_SHOULD_PARK,
      71             : };
      72             : 
      73             : static inline struct kthread *to_kthread(struct task_struct *k)
      74             : {
      75        3359 :         WARN_ON(!(k->flags & PF_KTHREAD));
      76        3359 :         return k->worker_private;
      77             : }
      78             : 
      79             : /*
      80             :  * Variant of to_kthread() that doesn't assume @p is a kthread.
      81             :  *
      82             :  * Per construction; when:
      83             :  *
      84             :  *   (p->flags & PF_KTHREAD) && p->worker_private
      85             :  *
      86             :  * the task is both a kthread and struct kthread is persistent. However
      87             :  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
      88             :  * begin_new_exec()).
      89             :  */
      90             : static inline struct kthread *__to_kthread(struct task_struct *p)
      91             : {
      92           0 :         void *kthread = p->worker_private;
      93           0 :         if (kthread && !(p->flags & PF_KTHREAD))
      94           0 :                 kthread = NULL;
      95             :         return kthread;
      96             : }
      97             : 
      98           0 : void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
      99             : {
     100           0 :         struct kthread *kthread = to_kthread(tsk);
     101             : 
     102           0 :         if (!kthread || !kthread->full_name) {
     103           0 :                 __get_task_comm(buf, buf_size, tsk);
     104           0 :                 return;
     105             :         }
     106             : 
     107           0 :         strscpy_pad(buf, kthread->full_name, buf_size);
     108             : }
     109             : 
     110         340 : bool set_kthread_struct(struct task_struct *p)
     111             : {
     112             :         struct kthread *kthread;
     113             : 
     114         680 :         if (WARN_ON_ONCE(to_kthread(p)))
     115             :                 return false;
     116             : 
     117         340 :         kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
     118         340 :         if (!kthread)
     119             :                 return false;
     120             : 
     121         680 :         init_completion(&kthread->exited);
     122         680 :         init_completion(&kthread->parked);
     123         340 :         p->vfork_done = &kthread->exited;
     124             : 
     125         340 :         p->worker_private = kthread;
     126         340 :         return true;
     127             : }
     128             : 
     129         324 : void free_kthread_struct(struct task_struct *k)
     130             : {
     131             :         struct kthread *kthread;
     132             : 
     133             :         /*
     134             :          * Can be NULL if kmalloc() in set_kthread_struct() failed.
     135             :          */
     136         648 :         kthread = to_kthread(k);
     137         324 :         if (!kthread)
     138             :                 return;
     139             : 
     140             : #ifdef CONFIG_BLK_CGROUP
     141             :         WARN_ON_ONCE(kthread->blkcg_css);
     142             : #endif
     143         324 :         k->worker_private = NULL;
     144         324 :         kfree(kthread->full_name);
     145         324 :         kfree(kthread);
     146             : }
     147             : 
     148             : /**
     149             :  * kthread_should_stop - should this kthread return now?
     150             :  *
     151             :  * When someone calls kthread_stop() on your kthread, it will be woken
     152             :  * and this will return true.  You should then return, and your return
     153             :  * value will be passed through to kthread_stop().
     154             :  */
     155         899 : bool kthread_should_stop(void)
     156             : {
     157        2697 :         return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
     158             : }
     159             : EXPORT_SYMBOL(kthread_should_stop);
     160             : 
     161         687 : bool __kthread_should_park(struct task_struct *k)
     162             : {
     163        2061 :         return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
     164             : }
     165             : EXPORT_SYMBOL_GPL(__kthread_should_park);
     166             : 
     167             : /**
     168             :  * kthread_should_park - should this kthread park now?
     169             :  *
     170             :  * When someone calls kthread_park() on your kthread, it will be woken
     171             :  * and this will return true.  You should then do the necessary
     172             :  * cleanup and call kthread_parkme()
     173             :  *
     174             :  * Similar to kthread_should_stop(), but this keeps the thread alive
     175             :  * and in a park position. kthread_unpark() "restarts" the thread and
     176             :  * calls the thread function again.
     177             :  */
     178         686 : bool kthread_should_park(void)
     179             : {
     180         686 :         return __kthread_should_park(current);
     181             : }
     182             : EXPORT_SYMBOL_GPL(kthread_should_park);
     183             : 
     184             : /**
     185             :  * kthread_freezable_should_stop - should this freezable kthread return now?
     186             :  * @was_frozen: optional out parameter, indicates whether %current was frozen
     187             :  *
     188             :  * kthread_should_stop() for freezable kthreads, which will enter
     189             :  * refrigerator if necessary.  This function is safe from kthread_stop() /
     190             :  * freezer deadlock and freezable kthreads should use this function instead
     191             :  * of calling try_to_freeze() directly.
     192             :  */
     193           0 : bool kthread_freezable_should_stop(bool *was_frozen)
     194             : {
     195           0 :         bool frozen = false;
     196             : 
     197             :         might_sleep();
     198             : 
     199           0 :         if (unlikely(freezing(current)))
     200           0 :                 frozen = __refrigerator(true);
     201             : 
     202           0 :         if (was_frozen)
     203           0 :                 *was_frozen = frozen;
     204             : 
     205           0 :         return kthread_should_stop();
     206             : }
     207             : EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
     208             : 
     209             : /**
     210             :  * kthread_func - return the function specified on kthread creation
     211             :  * @task: kthread task in question
     212             :  *
     213             :  * Returns NULL if the task is not a kthread.
     214             :  */
     215           0 : void *kthread_func(struct task_struct *task)
     216             : {
     217           0 :         struct kthread *kthread = __to_kthread(task);
     218           0 :         if (kthread)
     219           0 :                 return kthread->threadfn;
     220             :         return NULL;
     221             : }
     222             : EXPORT_SYMBOL_GPL(kthread_func);
     223             : 
     224             : /**
     225             :  * kthread_data - return data value specified on kthread creation
     226             :  * @task: kthread task in question
     227             :  *
     228             :  * Return the data value specified when kthread @task was created.
     229             :  * The caller is responsible for ensuring the validity of @task when
     230             :  * calling this function.
     231             :  */
     232         113 : void *kthread_data(struct task_struct *task)
     233             : {
     234         226 :         return to_kthread(task)->data;
     235             : }
     236             : EXPORT_SYMBOL_GPL(kthread_data);
     237             : 
     238             : /**
     239             :  * kthread_probe_data - speculative version of kthread_data()
     240             :  * @task: possible kthread task in question
     241             :  *
     242             :  * @task could be a kthread task.  Return the data value specified when it
     243             :  * was created if accessible.  If @task isn't a kthread task or its data is
     244             :  * inaccessible for any reason, %NULL is returned.  This function requires
     245             :  * that @task itself is safe to dereference.
     246             :  */
     247           0 : void *kthread_probe_data(struct task_struct *task)
     248             : {
     249           0 :         struct kthread *kthread = __to_kthread(task);
     250           0 :         void *data = NULL;
     251             : 
     252           0 :         if (kthread)
     253           0 :                 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
     254           0 :         return data;
     255             : }
     256             : 
     257         338 : static void __kthread_parkme(struct kthread *self)
     258             : {
     259             :         for (;;) {
     260             :                 /*
     261             :                  * TASK_PARKED is a special state; we must serialize against
     262             :                  * possible pending wakeups to avoid store-store collisions on
     263             :                  * task->state.
     264             :                  *
     265             :                  * Such a collision might possibly result in the task state
     266             :                  * changin from TASK_PARKED and us failing the
     267             :                  * wait_task_inactive() in kthread_park().
     268             :                  */
     269        1696 :                 set_special_state(TASK_PARKED);
     270         678 :                 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
     271             :                         break;
     272             : 
     273             :                 /*
     274             :                  * Thread is going to call schedule(), do not preempt it,
     275             :                  * or the caller of kthread_park() may spend more time in
     276             :                  * wait_task_inactive().
     277             :                  */
     278           1 :                 preempt_disable();
     279           1 :                 complete(&self->parked);
     280           1 :                 schedule_preempt_disabled();
     281           1 :                 preempt_enable();
     282             :         }
     283         338 :         __set_current_state(TASK_RUNNING);
     284         338 : }
     285             : 
     286           0 : void kthread_parkme(void)
     287             : {
     288           0 :         __kthread_parkme(to_kthread(current));
     289           0 : }
     290             : EXPORT_SYMBOL_GPL(kthread_parkme);
     291             : 
     292             : /**
     293             :  * kthread_exit - Cause the current kthread return @result to kthread_stop().
     294             :  * @result: The integer value to return to kthread_stop().
     295             :  *
     296             :  * While kthread_exit can be called directly, it exists so that
     297             :  * functions which do some additional work in non-modular code such as
     298             :  * module_put_and_kthread_exit can be implemented.
     299             :  *
     300             :  * Does not return.
     301             :  */
     302         325 : void __noreturn kthread_exit(long result)
     303             : {
     304         650 :         struct kthread *kthread = to_kthread(current);
     305         325 :         kthread->result = result;
     306         325 :         do_exit(0);
     307             : }
     308             : 
     309             : /**
     310             :  * kthread_complete_and_exit - Exit the current kthread.
     311             :  * @comp: Completion to complete
     312             :  * @code: The integer value to return to kthread_stop().
     313             :  *
     314             :  * If present complete @comp and the reuturn code to kthread_stop().
     315             :  *
     316             :  * A kernel thread whose module may be removed after the completion of
     317             :  * @comp can use this function exit safely.
     318             :  *
     319             :  * Does not return.
     320             :  */
     321         325 : void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
     322             : {
     323         325 :         if (comp)
     324         325 :                 complete(comp);
     325             : 
     326         325 :         kthread_exit(code);
     327             : }
     328             : EXPORT_SYMBOL(kthread_complete_and_exit);
     329             : 
     330         338 : static int kthread(void *_create)
     331             : {
     332             :         static const struct sched_param param = { .sched_priority = 0 };
     333             :         /* Copy data: it's on kthread's stack */
     334         338 :         struct kthread_create_info *create = _create;
     335         338 :         int (*threadfn)(void *data) = create->threadfn;
     336         338 :         void *data = create->data;
     337             :         struct completion *done;
     338             :         struct kthread *self;
     339             :         int ret;
     340             : 
     341         676 :         self = to_kthread(current);
     342             : 
     343             :         /* Release the structure when caller killed by a fatal signal. */
     344         676 :         done = xchg(&create->done, NULL);
     345         338 :         if (!done) {
     346           0 :                 kfree(create);
     347           0 :                 kthread_exit(-EINTR);
     348             :         }
     349             : 
     350         338 :         self->threadfn = threadfn;
     351         338 :         self->data = data;
     352             : 
     353             :         /*
     354             :          * The new thread inherited kthreadd's priority and CPU mask. Reset
     355             :          * back to default in case they have been changed.
     356             :          */
     357         338 :         sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
     358        1014 :         set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
     359             : 
     360             :         /* OK, tell user we're spawned, wait for stop or wakeup */
     361         338 :         __set_current_state(TASK_UNINTERRUPTIBLE);
     362         338 :         create->result = current;
     363             :         /*
     364             :          * Thread is going to call schedule(), do not preempt it,
     365             :          * or the creator may spend more time in wait_task_inactive().
     366             :          */
     367         338 :         preempt_disable();
     368         338 :         complete(done);
     369         338 :         schedule_preempt_disabled();
     370         338 :         preempt_enable();
     371             : 
     372         338 :         ret = -EINTR;
     373         676 :         if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
     374             :                 cgroup_kthread_ready();
     375         338 :                 __kthread_parkme(self);
     376         338 :                 ret = threadfn(data);
     377             :         }
     378           0 :         kthread_exit(ret);
     379             : }
     380             : 
     381             : /* called from kernel_clone() to get node information for about to be created task */
     382         340 : int tsk_fork_get_node(struct task_struct *tsk)
     383             : {
     384             : #ifdef CONFIG_NUMA
     385             :         if (tsk == kthreadd_task)
     386             :                 return tsk->pref_node_fork;
     387             : #endif
     388         340 :         return NUMA_NO_NODE;
     389             : }
     390             : 
     391         338 : static void create_kthread(struct kthread_create_info *create)
     392             : {
     393             :         int pid;
     394             : 
     395             : #ifdef CONFIG_NUMA
     396             :         current->pref_node_fork = create->node;
     397             : #endif
     398             :         /* We want our own signal handler (we take no signals by default). */
     399         338 :         pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
     400         338 :         if (pid < 0) {
     401             :                 /* Release the structure when caller killed by a fatal signal. */
     402           0 :                 struct completion *done = xchg(&create->done, NULL);
     403             : 
     404           0 :                 if (!done) {
     405           0 :                         kfree(create);
     406           0 :                         return;
     407             :                 }
     408           0 :                 create->result = ERR_PTR(pid);
     409           0 :                 complete(done);
     410             :         }
     411             : }
     412             : 
     413             : static __printf(4, 0)
     414         338 : struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
     415             :                                                     void *data, int node,
     416             :                                                     const char namefmt[],
     417             :                                                     va_list args)
     418             : {
     419         338 :         DECLARE_COMPLETION_ONSTACK(done);
     420             :         struct task_struct *task;
     421         338 :         struct kthread_create_info *create = kmalloc(sizeof(*create),
     422             :                                                      GFP_KERNEL);
     423             : 
     424         338 :         if (!create)
     425             :                 return ERR_PTR(-ENOMEM);
     426         338 :         create->threadfn = threadfn;
     427         338 :         create->data = data;
     428         338 :         create->node = node;
     429         338 :         create->done = &done;
     430             : 
     431         338 :         spin_lock(&kthread_create_lock);
     432         676 :         list_add_tail(&create->list, &kthread_create_list);
     433         338 :         spin_unlock(&kthread_create_lock);
     434             : 
     435         338 :         wake_up_process(kthreadd_task);
     436             :         /*
     437             :          * Wait for completion in killable state, for I might be chosen by
     438             :          * the OOM killer while kthreadd is trying to allocate memory for
     439             :          * new kernel thread.
     440             :          */
     441         338 :         if (unlikely(wait_for_completion_killable(&done))) {
     442             :                 /*
     443             :                  * If I was killed by a fatal signal before kthreadd (or new
     444             :                  * kernel thread) calls complete(), leave the cleanup of this
     445             :                  * structure to that thread.
     446             :                  */
     447           0 :                 if (xchg(&create->done, NULL))
     448             :                         return ERR_PTR(-EINTR);
     449             :                 /*
     450             :                  * kthreadd (or new kernel thread) will call complete()
     451             :                  * shortly.
     452             :                  */
     453           0 :                 wait_for_completion(&done);
     454             :         }
     455         338 :         task = create->result;
     456         338 :         if (!IS_ERR(task)) {
     457             :                 char name[TASK_COMM_LEN];
     458             :                 va_list aq;
     459             :                 int len;
     460             : 
     461             :                 /*
     462             :                  * task is already visible to other tasks, so updating
     463             :                  * COMM must be protected.
     464             :                  */
     465         338 :                 va_copy(aq, args);
     466         338 :                 len = vsnprintf(name, sizeof(name), namefmt, aq);
     467         338 :                 va_end(aq);
     468         338 :                 if (len >= TASK_COMM_LEN) {
     469         650 :                         struct kthread *kthread = to_kthread(task);
     470             : 
     471             :                         /* leave it truncated when out of memory. */
     472         325 :                         kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
     473             :                 }
     474         338 :                 set_task_comm(task, name);
     475             :         }
     476         338 :         kfree(create);
     477         338 :         return task;
     478             : }
     479             : 
     480             : /**
     481             :  * kthread_create_on_node - create a kthread.
     482             :  * @threadfn: the function to run until signal_pending(current).
     483             :  * @data: data ptr for @threadfn.
     484             :  * @node: task and thread structures for the thread are allocated on this node
     485             :  * @namefmt: printf-style name for the thread.
     486             :  *
     487             :  * Description: This helper function creates and names a kernel
     488             :  * thread.  The thread will be stopped: use wake_up_process() to start
     489             :  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
     490             :  * is affine to all CPUs.
     491             :  *
     492             :  * If thread is going to be bound on a particular cpu, give its node
     493             :  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
     494             :  * When woken, the thread will run @threadfn() with @data as its
     495             :  * argument. @threadfn() can either return directly if it is a
     496             :  * standalone thread for which no one will call kthread_stop(), or
     497             :  * return when 'kthread_should_stop()' is true (which means
     498             :  * kthread_stop() has been called).  The return value should be zero
     499             :  * or a negative error number; it will be passed to kthread_stop().
     500             :  *
     501             :  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
     502             :  */
     503         338 : struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
     504             :                                            void *data, int node,
     505             :                                            const char namefmt[],
     506             :                                            ...)
     507             : {
     508             :         struct task_struct *task;
     509             :         va_list args;
     510             : 
     511         338 :         va_start(args, namefmt);
     512         338 :         task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
     513         338 :         va_end(args);
     514             : 
     515         338 :         return task;
     516             : }
     517             : EXPORT_SYMBOL(kthread_create_on_node);
     518             : 
     519          11 : static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
     520             : {
     521             :         unsigned long flags;
     522             : 
     523          11 :         if (!wait_task_inactive(p, state)) {
     524             :                 WARN_ON(1);
     525             :                 return;
     526             :         }
     527             : 
     528             :         /* It's safe because the task is inactive. */
     529          11 :         raw_spin_lock_irqsave(&p->pi_lock, flags);
     530          11 :         do_set_cpus_allowed(p, mask);
     531          11 :         p->flags |= PF_NO_SETAFFINITY;
     532          22 :         raw_spin_unlock_irqrestore(&p->pi_lock, flags);
     533             : }
     534             : 
     535             : static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
     536             : {
     537           2 :         __kthread_bind_mask(p, cpumask_of(cpu), state);
     538             : }
     539             : 
     540           9 : void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
     541             : {
     542           9 :         __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
     543           9 : }
     544             : 
     545             : /**
     546             :  * kthread_bind - bind a just-created kthread to a cpu.
     547             :  * @p: thread created by kthread_create().
     548             :  * @cpu: cpu (might not be online, must be possible) for @k to run on.
     549             :  *
     550             :  * Description: This function is equivalent to set_cpus_allowed(),
     551             :  * except that @cpu doesn't need to be online, and the thread must be
     552             :  * stopped (i.e., just returned from kthread_create()).
     553             :  */
     554           0 : void kthread_bind(struct task_struct *p, unsigned int cpu)
     555             : {
     556           2 :         __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
     557           0 : }
     558             : EXPORT_SYMBOL(kthread_bind);
     559             : 
     560             : /**
     561             :  * kthread_create_on_cpu - Create a cpu bound kthread
     562             :  * @threadfn: the function to run until signal_pending(current).
     563             :  * @data: data ptr for @threadfn.
     564             :  * @cpu: The cpu on which the thread should be bound,
     565             :  * @namefmt: printf-style name for the thread. Format is restricted
     566             :  *           to "name.*%u". Code fills in cpu number.
     567             :  *
     568             :  * Description: This helper function creates and names a kernel thread
     569             :  */
     570           1 : struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
     571             :                                           void *data, unsigned int cpu,
     572             :                                           const char *namefmt)
     573             : {
     574             :         struct task_struct *p;
     575             : 
     576           1 :         p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
     577             :                                    cpu);
     578           1 :         if (IS_ERR(p))
     579             :                 return p;
     580           1 :         kthread_bind(p, cpu);
     581             :         /* CPU hotplug need to bind once again when unparking the thread. */
     582           2 :         to_kthread(p)->cpu = cpu;
     583           1 :         return p;
     584             : }
     585             : EXPORT_SYMBOL(kthread_create_on_cpu);
     586             : 
     587           5 : void kthread_set_per_cpu(struct task_struct *k, int cpu)
     588             : {
     589          10 :         struct kthread *kthread = to_kthread(k);
     590           5 :         if (!kthread)
     591             :                 return;
     592             : 
     593           5 :         WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
     594             : 
     595           5 :         if (cpu < 0) {
     596           0 :                 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     597             :                 return;
     598             :         }
     599             : 
     600           5 :         kthread->cpu = cpu;
     601           5 :         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     602             : }
     603             : 
     604           0 : bool kthread_is_per_cpu(struct task_struct *p)
     605             : {
     606           0 :         struct kthread *kthread = __to_kthread(p);
     607           0 :         if (!kthread)
     608             :                 return false;
     609             : 
     610           0 :         return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     611             : }
     612             : 
     613             : /**
     614             :  * kthread_unpark - unpark a thread created by kthread_create().
     615             :  * @k:          thread created by kthread_create().
     616             :  *
     617             :  * Sets kthread_should_park() for @k to return false, wakes it, and
     618             :  * waits for it to return. If the thread is marked percpu then its
     619             :  * bound to the cpu again.
     620             :  */
     621           1 : void kthread_unpark(struct task_struct *k)
     622             : {
     623           2 :         struct kthread *kthread = to_kthread(k);
     624             : 
     625             :         /*
     626             :          * Newly created kthread was parked when the CPU was offline.
     627             :          * The binding was lost and we need to set it again.
     628             :          */
     629           2 :         if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
     630           1 :                 __kthread_bind(k, kthread->cpu, TASK_PARKED);
     631             : 
     632           2 :         clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     633             :         /*
     634             :          * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
     635             :          */
     636           1 :         wake_up_state(k, TASK_PARKED);
     637           1 : }
     638             : EXPORT_SYMBOL_GPL(kthread_unpark);
     639             : 
     640             : /**
     641             :  * kthread_park - park a thread created by kthread_create().
     642             :  * @k: thread created by kthread_create().
     643             :  *
     644             :  * Sets kthread_should_park() for @k to return true, wakes it, and
     645             :  * waits for it to return. This can also be called after kthread_create()
     646             :  * instead of calling wake_up_process(): the thread will park without
     647             :  * calling threadfn().
     648             :  *
     649             :  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
     650             :  * If called by the kthread itself just the park bit is set.
     651             :  */
     652           1 : int kthread_park(struct task_struct *k)
     653             : {
     654           2 :         struct kthread *kthread = to_kthread(k);
     655             : 
     656           1 :         if (WARN_ON(k->flags & PF_EXITING))
     657             :                 return -ENOSYS;
     658             : 
     659           2 :         if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
     660             :                 return -EBUSY;
     661             : 
     662           2 :         set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     663           1 :         if (k != current) {
     664           1 :                 wake_up_process(k);
     665             :                 /*
     666             :                  * Wait for __kthread_parkme() to complete(), this means we
     667             :                  * _will_ have TASK_PARKED and are about to call schedule().
     668             :                  */
     669           1 :                 wait_for_completion(&kthread->parked);
     670             :                 /*
     671             :                  * Now wait for that schedule() to complete and the task to
     672             :                  * get scheduled out.
     673             :                  */
     674           1 :                 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
     675             :         }
     676             : 
     677             :         return 0;
     678             : }
     679             : EXPORT_SYMBOL_GPL(kthread_park);
     680             : 
     681             : /**
     682             :  * kthread_stop - stop a thread created by kthread_create().
     683             :  * @k: thread created by kthread_create().
     684             :  *
     685             :  * Sets kthread_should_stop() for @k to return true, wakes it, and
     686             :  * waits for it to exit. This can also be called after kthread_create()
     687             :  * instead of calling wake_up_process(): the thread will exit without
     688             :  * calling threadfn().
     689             :  *
     690             :  * If threadfn() may call kthread_exit() itself, the caller must ensure
     691             :  * task_struct can't go away.
     692             :  *
     693             :  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
     694             :  * was never called.
     695             :  */
     696           0 : int kthread_stop(struct task_struct *k)
     697             : {
     698             :         struct kthread *kthread;
     699             :         int ret;
     700             : 
     701           0 :         trace_sched_kthread_stop(k);
     702             : 
     703           0 :         get_task_struct(k);
     704           0 :         kthread = to_kthread(k);
     705           0 :         set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
     706           0 :         kthread_unpark(k);
     707           0 :         set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
     708           0 :         wake_up_process(k);
     709           0 :         wait_for_completion(&kthread->exited);
     710           0 :         ret = kthread->result;
     711           0 :         put_task_struct(k);
     712             : 
     713           0 :         trace_sched_kthread_stop_ret(ret);
     714           0 :         return ret;
     715             : }
     716             : EXPORT_SYMBOL(kthread_stop);
     717             : 
     718           1 : int kthreadd(void *unused)
     719             : {
     720           1 :         struct task_struct *tsk = current;
     721             : 
     722             :         /* Setup a clean context for our children to inherit. */
     723           1 :         set_task_comm(tsk, "kthreadd");
     724           1 :         ignore_signals(tsk);
     725           2 :         set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
     726           1 :         set_mems_allowed(node_states[N_MEMORY]);
     727             : 
     728           1 :         current->flags |= PF_NOFREEZE;
     729             :         cgroup_init_kthreadd();
     730             : 
     731             :         for (;;) {
     732         338 :                 set_current_state(TASK_INTERRUPTIBLE);
     733         338 :                 if (list_empty(&kthread_create_list))
     734         338 :                         schedule();
     735         337 :                 __set_current_state(TASK_RUNNING);
     736             : 
     737             :                 spin_lock(&kthread_create_lock);
     738         675 :                 while (!list_empty(&kthread_create_list)) {
     739             :                         struct kthread_create_info *create;
     740             : 
     741         338 :                         create = list_entry(kthread_create_list.next,
     742             :                                             struct kthread_create_info, list);
     743         676 :                         list_del_init(&create->list);
     744         338 :                         spin_unlock(&kthread_create_lock);
     745             : 
     746         338 :                         create_kthread(create);
     747             : 
     748             :                         spin_lock(&kthread_create_lock);
     749             :                 }
     750             :                 spin_unlock(&kthread_create_lock);
     751             :         }
     752             : 
     753             :         return 0;
     754             : }
     755             : 
     756           0 : void __kthread_init_worker(struct kthread_worker *worker,
     757             :                                 const char *name,
     758             :                                 struct lock_class_key *key)
     759             : {
     760           0 :         memset(worker, 0, sizeof(struct kthread_worker));
     761             :         raw_spin_lock_init(&worker->lock);
     762             :         lockdep_set_class_and_name(&worker->lock, key, name);
     763           0 :         INIT_LIST_HEAD(&worker->work_list);
     764           0 :         INIT_LIST_HEAD(&worker->delayed_work_list);
     765           0 : }
     766             : EXPORT_SYMBOL_GPL(__kthread_init_worker);
     767             : 
     768             : /**
     769             :  * kthread_worker_fn - kthread function to process kthread_worker
     770             :  * @worker_ptr: pointer to initialized kthread_worker
     771             :  *
     772             :  * This function implements the main cycle of kthread worker. It processes
     773             :  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
     774             :  * is empty.
     775             :  *
     776             :  * The works are not allowed to keep any locks, disable preemption or interrupts
     777             :  * when they finish. There is defined a safe point for freezing when one work
     778             :  * finishes and before a new one is started.
     779             :  *
     780             :  * Also the works must not be handled by more than one worker at the same time,
     781             :  * see also kthread_queue_work().
     782             :  */
     783           0 : int kthread_worker_fn(void *worker_ptr)
     784             : {
     785           0 :         struct kthread_worker *worker = worker_ptr;
     786             :         struct kthread_work *work;
     787             : 
     788             :         /*
     789             :          * FIXME: Update the check and remove the assignment when all kthread
     790             :          * worker users are created using kthread_create_worker*() functions.
     791             :          */
     792           0 :         WARN_ON(worker->task && worker->task != current);
     793           0 :         worker->task = current;
     794             : 
     795           0 :         if (worker->flags & KTW_FREEZABLE)
     796           0 :                 set_freezable();
     797             : 
     798             : repeat:
     799           0 :         set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
     800             : 
     801           0 :         if (kthread_should_stop()) {
     802           0 :                 __set_current_state(TASK_RUNNING);
     803           0 :                 raw_spin_lock_irq(&worker->lock);
     804           0 :                 worker->task = NULL;
     805           0 :                 raw_spin_unlock_irq(&worker->lock);
     806           0 :                 return 0;
     807             :         }
     808             : 
     809           0 :         work = NULL;
     810           0 :         raw_spin_lock_irq(&worker->lock);
     811           0 :         if (!list_empty(&worker->work_list)) {
     812           0 :                 work = list_first_entry(&worker->work_list,
     813             :                                         struct kthread_work, node);
     814           0 :                 list_del_init(&work->node);
     815             :         }
     816           0 :         worker->current_work = work;
     817           0 :         raw_spin_unlock_irq(&worker->lock);
     818             : 
     819           0 :         if (work) {
     820           0 :                 kthread_work_func_t func = work->func;
     821           0 :                 __set_current_state(TASK_RUNNING);
     822           0 :                 trace_sched_kthread_work_execute_start(work);
     823           0 :                 work->func(work);
     824             :                 /*
     825             :                  * Avoid dereferencing work after this point.  The trace
     826             :                  * event only cares about the address.
     827             :                  */
     828           0 :                 trace_sched_kthread_work_execute_end(work, func);
     829           0 :         } else if (!freezing(current))
     830           0 :                 schedule();
     831             : 
     832           0 :         try_to_freeze();
     833           0 :         cond_resched();
     834           0 :         goto repeat;
     835             : }
     836             : EXPORT_SYMBOL_GPL(kthread_worker_fn);
     837             : 
     838             : static __printf(3, 0) struct kthread_worker *
     839           0 : __kthread_create_worker(int cpu, unsigned int flags,
     840             :                         const char namefmt[], va_list args)
     841             : {
     842             :         struct kthread_worker *worker;
     843             :         struct task_struct *task;
     844           0 :         int node = NUMA_NO_NODE;
     845             : 
     846           0 :         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
     847           0 :         if (!worker)
     848             :                 return ERR_PTR(-ENOMEM);
     849             : 
     850           0 :         kthread_init_worker(worker);
     851             : 
     852           0 :         if (cpu >= 0)
     853           0 :                 node = cpu_to_node(cpu);
     854             : 
     855           0 :         task = __kthread_create_on_node(kthread_worker_fn, worker,
     856             :                                                 node, namefmt, args);
     857           0 :         if (IS_ERR(task))
     858             :                 goto fail_task;
     859             : 
     860           0 :         if (cpu >= 0)
     861           0 :                 kthread_bind(task, cpu);
     862             : 
     863           0 :         worker->flags = flags;
     864           0 :         worker->task = task;
     865           0 :         wake_up_process(task);
     866           0 :         return worker;
     867             : 
     868             : fail_task:
     869           0 :         kfree(worker);
     870           0 :         return ERR_CAST(task);
     871             : }
     872             : 
     873             : /**
     874             :  * kthread_create_worker - create a kthread worker
     875             :  * @flags: flags modifying the default behavior of the worker
     876             :  * @namefmt: printf-style name for the kthread worker (task).
     877             :  *
     878             :  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     879             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     880             :  * when the caller was killed by a fatal signal.
     881             :  */
     882             : struct kthread_worker *
     883           0 : kthread_create_worker(unsigned int flags, const char namefmt[], ...)
     884             : {
     885             :         struct kthread_worker *worker;
     886             :         va_list args;
     887             : 
     888           0 :         va_start(args, namefmt);
     889           0 :         worker = __kthread_create_worker(-1, flags, namefmt, args);
     890           0 :         va_end(args);
     891             : 
     892           0 :         return worker;
     893             : }
     894             : EXPORT_SYMBOL(kthread_create_worker);
     895             : 
     896             : /**
     897             :  * kthread_create_worker_on_cpu - create a kthread worker and bind it
     898             :  *      to a given CPU and the associated NUMA node.
     899             :  * @cpu: CPU number
     900             :  * @flags: flags modifying the default behavior of the worker
     901             :  * @namefmt: printf-style name for the kthread worker (task).
     902             :  *
     903             :  * Use a valid CPU number if you want to bind the kthread worker
     904             :  * to the given CPU and the associated NUMA node.
     905             :  *
     906             :  * A good practice is to add the cpu number also into the worker name.
     907             :  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
     908             :  *
     909             :  * CPU hotplug:
     910             :  * The kthread worker API is simple and generic. It just provides a way
     911             :  * to create, use, and destroy workers.
     912             :  *
     913             :  * It is up to the API user how to handle CPU hotplug. They have to decide
     914             :  * how to handle pending work items, prevent queuing new ones, and
     915             :  * restore the functionality when the CPU goes off and on. There are a
     916             :  * few catches:
     917             :  *
     918             :  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
     919             :  *
     920             :  *    - The worker might not exist when the CPU was off when the user
     921             :  *      created the workers.
     922             :  *
     923             :  * Good practice is to implement two CPU hotplug callbacks and to
     924             :  * destroy/create the worker when the CPU goes down/up.
     925             :  *
     926             :  * Return:
     927             :  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     928             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     929             :  * when the caller was killed by a fatal signal.
     930             :  */
     931             : struct kthread_worker *
     932           0 : kthread_create_worker_on_cpu(int cpu, unsigned int flags,
     933             :                              const char namefmt[], ...)
     934             : {
     935             :         struct kthread_worker *worker;
     936             :         va_list args;
     937             : 
     938           0 :         va_start(args, namefmt);
     939           0 :         worker = __kthread_create_worker(cpu, flags, namefmt, args);
     940           0 :         va_end(args);
     941             : 
     942           0 :         return worker;
     943             : }
     944             : EXPORT_SYMBOL(kthread_create_worker_on_cpu);
     945             : 
     946             : /*
     947             :  * Returns true when the work could not be queued at the moment.
     948             :  * It happens when it is already pending in a worker list
     949             :  * or when it is being cancelled.
     950             :  */
     951             : static inline bool queuing_blocked(struct kthread_worker *worker,
     952             :                                    struct kthread_work *work)
     953             : {
     954             :         lockdep_assert_held(&worker->lock);
     955             : 
     956           0 :         return !list_empty(&work->node) || work->canceling;
     957             : }
     958             : 
     959           0 : static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
     960             :                                              struct kthread_work *work)
     961             : {
     962             :         lockdep_assert_held(&worker->lock);
     963           0 :         WARN_ON_ONCE(!list_empty(&work->node));
     964             :         /* Do not use a work with >1 worker, see kthread_queue_work() */
     965           0 :         WARN_ON_ONCE(work->worker && work->worker != worker);
     966           0 : }
     967             : 
     968             : /* insert @work before @pos in @worker */
     969           0 : static void kthread_insert_work(struct kthread_worker *worker,
     970             :                                 struct kthread_work *work,
     971             :                                 struct list_head *pos)
     972             : {
     973           0 :         kthread_insert_work_sanity_check(worker, work);
     974             : 
     975           0 :         trace_sched_kthread_work_queue_work(worker, work);
     976             : 
     977           0 :         list_add_tail(&work->node, pos);
     978           0 :         work->worker = worker;
     979           0 :         if (!worker->current_work && likely(worker->task))
     980           0 :                 wake_up_process(worker->task);
     981           0 : }
     982             : 
     983             : /**
     984             :  * kthread_queue_work - queue a kthread_work
     985             :  * @worker: target kthread_worker
     986             :  * @work: kthread_work to queue
     987             :  *
     988             :  * Queue @work to work processor @task for async execution.  @task
     989             :  * must have been created with kthread_worker_create().  Returns %true
     990             :  * if @work was successfully queued, %false if it was already pending.
     991             :  *
     992             :  * Reinitialize the work if it needs to be used by another worker.
     993             :  * For example, when the worker was stopped and started again.
     994             :  */
     995           0 : bool kthread_queue_work(struct kthread_worker *worker,
     996             :                         struct kthread_work *work)
     997             : {
     998           0 :         bool ret = false;
     999             :         unsigned long flags;
    1000             : 
    1001           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1002           0 :         if (!queuing_blocked(worker, work)) {
    1003           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1004           0 :                 ret = true;
    1005             :         }
    1006           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1007           0 :         return ret;
    1008             : }
    1009             : EXPORT_SYMBOL_GPL(kthread_queue_work);
    1010             : 
    1011             : /**
    1012             :  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
    1013             :  *      delayed work when the timer expires.
    1014             :  * @t: pointer to the expired timer
    1015             :  *
    1016             :  * The format of the function is defined by struct timer_list.
    1017             :  * It should have been called from irqsafe timer with irq already off.
    1018             :  */
    1019           0 : void kthread_delayed_work_timer_fn(struct timer_list *t)
    1020             : {
    1021           0 :         struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
    1022           0 :         struct kthread_work *work = &dwork->work;
    1023           0 :         struct kthread_worker *worker = work->worker;
    1024             :         unsigned long flags;
    1025             : 
    1026             :         /*
    1027             :          * This might happen when a pending work is reinitialized.
    1028             :          * It means that it is used a wrong way.
    1029             :          */
    1030           0 :         if (WARN_ON_ONCE(!worker))
    1031             :                 return;
    1032             : 
    1033           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1034             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1035           0 :         WARN_ON_ONCE(work->worker != worker);
    1036             : 
    1037             :         /* Move the work from worker->delayed_work_list. */
    1038           0 :         WARN_ON_ONCE(list_empty(&work->node));
    1039           0 :         list_del_init(&work->node);
    1040           0 :         if (!work->canceling)
    1041           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1042             : 
    1043           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1044             : }
    1045             : EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
    1046             : 
    1047           0 : static void __kthread_queue_delayed_work(struct kthread_worker *worker,
    1048             :                                          struct kthread_delayed_work *dwork,
    1049             :                                          unsigned long delay)
    1050             : {
    1051           0 :         struct timer_list *timer = &dwork->timer;
    1052           0 :         struct kthread_work *work = &dwork->work;
    1053             : 
    1054           0 :         WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
    1055             : 
    1056             :         /*
    1057             :          * If @delay is 0, queue @dwork->work immediately.  This is for
    1058             :          * both optimization and correctness.  The earliest @timer can
    1059             :          * expire is on the closest next tick and delayed_work users depend
    1060             :          * on that there's no such delay when @delay is 0.
    1061             :          */
    1062           0 :         if (!delay) {
    1063           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1064           0 :                 return;
    1065             :         }
    1066             : 
    1067             :         /* Be paranoid and try to detect possible races already now. */
    1068           0 :         kthread_insert_work_sanity_check(worker, work);
    1069             : 
    1070           0 :         list_add(&work->node, &worker->delayed_work_list);
    1071           0 :         work->worker = worker;
    1072           0 :         timer->expires = jiffies + delay;
    1073           0 :         add_timer(timer);
    1074             : }
    1075             : 
    1076             : /**
    1077             :  * kthread_queue_delayed_work - queue the associated kthread work
    1078             :  *      after a delay.
    1079             :  * @worker: target kthread_worker
    1080             :  * @dwork: kthread_delayed_work to queue
    1081             :  * @delay: number of jiffies to wait before queuing
    1082             :  *
    1083             :  * If the work has not been pending it starts a timer that will queue
    1084             :  * the work after the given @delay. If @delay is zero, it queues the
    1085             :  * work immediately.
    1086             :  *
    1087             :  * Return: %false if the @work has already been pending. It means that
    1088             :  * either the timer was running or the work was queued. It returns %true
    1089             :  * otherwise.
    1090             :  */
    1091           0 : bool kthread_queue_delayed_work(struct kthread_worker *worker,
    1092             :                                 struct kthread_delayed_work *dwork,
    1093             :                                 unsigned long delay)
    1094             : {
    1095           0 :         struct kthread_work *work = &dwork->work;
    1096             :         unsigned long flags;
    1097           0 :         bool ret = false;
    1098             : 
    1099           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1100             : 
    1101           0 :         if (!queuing_blocked(worker, work)) {
    1102           0 :                 __kthread_queue_delayed_work(worker, dwork, delay);
    1103           0 :                 ret = true;
    1104             :         }
    1105             : 
    1106           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1107           0 :         return ret;
    1108             : }
    1109             : EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
    1110             : 
    1111             : struct kthread_flush_work {
    1112             :         struct kthread_work     work;
    1113             :         struct completion       done;
    1114             : };
    1115             : 
    1116           0 : static void kthread_flush_work_fn(struct kthread_work *work)
    1117             : {
    1118           0 :         struct kthread_flush_work *fwork =
    1119           0 :                 container_of(work, struct kthread_flush_work, work);
    1120           0 :         complete(&fwork->done);
    1121           0 : }
    1122             : 
    1123             : /**
    1124             :  * kthread_flush_work - flush a kthread_work
    1125             :  * @work: work to flush
    1126             :  *
    1127             :  * If @work is queued or executing, wait for it to finish execution.
    1128             :  */
    1129           0 : void kthread_flush_work(struct kthread_work *work)
    1130             : {
    1131           0 :         struct kthread_flush_work fwork = {
    1132             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1133           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1134             :         };
    1135             :         struct kthread_worker *worker;
    1136           0 :         bool noop = false;
    1137             : 
    1138           0 :         worker = work->worker;
    1139           0 :         if (!worker)
    1140           0 :                 return;
    1141             : 
    1142           0 :         raw_spin_lock_irq(&worker->lock);
    1143             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1144           0 :         WARN_ON_ONCE(work->worker != worker);
    1145             : 
    1146           0 :         if (!list_empty(&work->node))
    1147           0 :                 kthread_insert_work(worker, &fwork.work, work->node.next);
    1148           0 :         else if (worker->current_work == work)
    1149           0 :                 kthread_insert_work(worker, &fwork.work,
    1150             :                                     worker->work_list.next);
    1151             :         else
    1152             :                 noop = true;
    1153             : 
    1154           0 :         raw_spin_unlock_irq(&worker->lock);
    1155             : 
    1156           0 :         if (!noop)
    1157           0 :                 wait_for_completion(&fwork.done);
    1158             : }
    1159             : EXPORT_SYMBOL_GPL(kthread_flush_work);
    1160             : 
    1161             : /*
    1162             :  * Make sure that the timer is neither set nor running and could
    1163             :  * not manipulate the work list_head any longer.
    1164             :  *
    1165             :  * The function is called under worker->lock. The lock is temporary
    1166             :  * released but the timer can't be set again in the meantime.
    1167             :  */
    1168           0 : static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
    1169             :                                               unsigned long *flags)
    1170             : {
    1171           0 :         struct kthread_delayed_work *dwork =
    1172           0 :                 container_of(work, struct kthread_delayed_work, work);
    1173           0 :         struct kthread_worker *worker = work->worker;
    1174             : 
    1175             :         /*
    1176             :          * del_timer_sync() must be called to make sure that the timer
    1177             :          * callback is not running. The lock must be temporary released
    1178             :          * to avoid a deadlock with the callback. In the meantime,
    1179             :          * any queuing is blocked by setting the canceling counter.
    1180             :          */
    1181           0 :         work->canceling++;
    1182           0 :         raw_spin_unlock_irqrestore(&worker->lock, *flags);
    1183           0 :         del_timer_sync(&dwork->timer);
    1184           0 :         raw_spin_lock_irqsave(&worker->lock, *flags);
    1185           0 :         work->canceling--;
    1186           0 : }
    1187             : 
    1188             : /*
    1189             :  * This function removes the work from the worker queue.
    1190             :  *
    1191             :  * It is called under worker->lock. The caller must make sure that
    1192             :  * the timer used by delayed work is not running, e.g. by calling
    1193             :  * kthread_cancel_delayed_work_timer().
    1194             :  *
    1195             :  * The work might still be in use when this function finishes. See the
    1196             :  * current_work proceed by the worker.
    1197             :  *
    1198             :  * Return: %true if @work was pending and successfully canceled,
    1199             :  *      %false if @work was not pending
    1200             :  */
    1201             : static bool __kthread_cancel_work(struct kthread_work *work)
    1202             : {
    1203             :         /*
    1204             :          * Try to remove the work from a worker list. It might either
    1205             :          * be from worker->work_list or from worker->delayed_work_list.
    1206             :          */
    1207           0 :         if (!list_empty(&work->node)) {
    1208           0 :                 list_del_init(&work->node);
    1209             :                 return true;
    1210             :         }
    1211             : 
    1212             :         return false;
    1213             : }
    1214             : 
    1215             : /**
    1216             :  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
    1217             :  * @worker: kthread worker to use
    1218             :  * @dwork: kthread delayed work to queue
    1219             :  * @delay: number of jiffies to wait before queuing
    1220             :  *
    1221             :  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
    1222             :  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
    1223             :  * @work is guaranteed to be queued immediately.
    1224             :  *
    1225             :  * Return: %false if @dwork was idle and queued, %true otherwise.
    1226             :  *
    1227             :  * A special case is when the work is being canceled in parallel.
    1228             :  * It might be caused either by the real kthread_cancel_delayed_work_sync()
    1229             :  * or yet another kthread_mod_delayed_work() call. We let the other command
    1230             :  * win and return %true here. The return value can be used for reference
    1231             :  * counting and the number of queued works stays the same. Anyway, the caller
    1232             :  * is supposed to synchronize these operations a reasonable way.
    1233             :  *
    1234             :  * This function is safe to call from any context including IRQ handler.
    1235             :  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
    1236             :  * for details.
    1237             :  */
    1238           0 : bool kthread_mod_delayed_work(struct kthread_worker *worker,
    1239             :                               struct kthread_delayed_work *dwork,
    1240             :                               unsigned long delay)
    1241             : {
    1242           0 :         struct kthread_work *work = &dwork->work;
    1243             :         unsigned long flags;
    1244             :         int ret;
    1245             : 
    1246           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1247             : 
    1248             :         /* Do not bother with canceling when never queued. */
    1249           0 :         if (!work->worker) {
    1250             :                 ret = false;
    1251             :                 goto fast_queue;
    1252             :         }
    1253             : 
    1254             :         /* Work must not be used with >1 worker, see kthread_queue_work() */
    1255           0 :         WARN_ON_ONCE(work->worker != worker);
    1256             : 
    1257             :         /*
    1258             :          * Temporary cancel the work but do not fight with another command
    1259             :          * that is canceling the work as well.
    1260             :          *
    1261             :          * It is a bit tricky because of possible races with another
    1262             :          * mod_delayed_work() and cancel_delayed_work() callers.
    1263             :          *
    1264             :          * The timer must be canceled first because worker->lock is released
    1265             :          * when doing so. But the work can be removed from the queue (list)
    1266             :          * only when it can be queued again so that the return value can
    1267             :          * be used for reference counting.
    1268             :          */
    1269           0 :         kthread_cancel_delayed_work_timer(work, &flags);
    1270           0 :         if (work->canceling) {
    1271             :                 /* The number of works in the queue does not change. */
    1272             :                 ret = true;
    1273             :                 goto out;
    1274             :         }
    1275           0 :         ret = __kthread_cancel_work(work);
    1276             : 
    1277             : fast_queue:
    1278           0 :         __kthread_queue_delayed_work(worker, dwork, delay);
    1279             : out:
    1280           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1281           0 :         return ret;
    1282             : }
    1283             : EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
    1284             : 
    1285           0 : static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
    1286             : {
    1287           0 :         struct kthread_worker *worker = work->worker;
    1288             :         unsigned long flags;
    1289           0 :         int ret = false;
    1290             : 
    1291           0 :         if (!worker)
    1292             :                 goto out;
    1293             : 
    1294           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1295             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1296           0 :         WARN_ON_ONCE(work->worker != worker);
    1297             : 
    1298           0 :         if (is_dwork)
    1299           0 :                 kthread_cancel_delayed_work_timer(work, &flags);
    1300             : 
    1301           0 :         ret = __kthread_cancel_work(work);
    1302             : 
    1303           0 :         if (worker->current_work != work)
    1304             :                 goto out_fast;
    1305             : 
    1306             :         /*
    1307             :          * The work is in progress and we need to wait with the lock released.
    1308             :          * In the meantime, block any queuing by setting the canceling counter.
    1309             :          */
    1310           0 :         work->canceling++;
    1311           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1312           0 :         kthread_flush_work(work);
    1313           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1314           0 :         work->canceling--;
    1315             : 
    1316             : out_fast:
    1317           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1318             : out:
    1319           0 :         return ret;
    1320             : }
    1321             : 
    1322             : /**
    1323             :  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
    1324             :  * @work: the kthread work to cancel
    1325             :  *
    1326             :  * Cancel @work and wait for its execution to finish.  This function
    1327             :  * can be used even if the work re-queues itself. On return from this
    1328             :  * function, @work is guaranteed to be not pending or executing on any CPU.
    1329             :  *
    1330             :  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
    1331             :  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
    1332             :  *
    1333             :  * The caller must ensure that the worker on which @work was last
    1334             :  * queued can't be destroyed before this function returns.
    1335             :  *
    1336             :  * Return: %true if @work was pending, %false otherwise.
    1337             :  */
    1338           0 : bool kthread_cancel_work_sync(struct kthread_work *work)
    1339             : {
    1340           0 :         return __kthread_cancel_work_sync(work, false);
    1341             : }
    1342             : EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
    1343             : 
    1344             : /**
    1345             :  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
    1346             :  *      wait for it to finish.
    1347             :  * @dwork: the kthread delayed work to cancel
    1348             :  *
    1349             :  * This is kthread_cancel_work_sync() for delayed works.
    1350             :  *
    1351             :  * Return: %true if @dwork was pending, %false otherwise.
    1352             :  */
    1353           0 : bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
    1354             : {
    1355           0 :         return __kthread_cancel_work_sync(&dwork->work, true);
    1356             : }
    1357             : EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
    1358             : 
    1359             : /**
    1360             :  * kthread_flush_worker - flush all current works on a kthread_worker
    1361             :  * @worker: worker to flush
    1362             :  *
    1363             :  * Wait until all currently executing or pending works on @worker are
    1364             :  * finished.
    1365             :  */
    1366           0 : void kthread_flush_worker(struct kthread_worker *worker)
    1367             : {
    1368           0 :         struct kthread_flush_work fwork = {
    1369             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1370           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1371             :         };
    1372             : 
    1373           0 :         kthread_queue_work(worker, &fwork.work);
    1374           0 :         wait_for_completion(&fwork.done);
    1375           0 : }
    1376             : EXPORT_SYMBOL_GPL(kthread_flush_worker);
    1377             : 
    1378             : /**
    1379             :  * kthread_destroy_worker - destroy a kthread worker
    1380             :  * @worker: worker to be destroyed
    1381             :  *
    1382             :  * Flush and destroy @worker.  The simple flush is enough because the kthread
    1383             :  * worker API is used only in trivial scenarios.  There are no multi-step state
    1384             :  * machines needed.
    1385             :  *
    1386             :  * Note that this function is not responsible for handling delayed work, so
    1387             :  * caller should be responsible for queuing or canceling all delayed work items
    1388             :  * before invoke this function.
    1389             :  */
    1390           0 : void kthread_destroy_worker(struct kthread_worker *worker)
    1391             : {
    1392             :         struct task_struct *task;
    1393             : 
    1394           0 :         task = worker->task;
    1395           0 :         if (WARN_ON(!task))
    1396             :                 return;
    1397             : 
    1398           0 :         kthread_flush_worker(worker);
    1399           0 :         kthread_stop(task);
    1400           0 :         WARN_ON(!list_empty(&worker->delayed_work_list));
    1401           0 :         WARN_ON(!list_empty(&worker->work_list));
    1402           0 :         kfree(worker);
    1403             : }
    1404             : EXPORT_SYMBOL(kthread_destroy_worker);
    1405             : 
    1406             : /**
    1407             :  * kthread_use_mm - make the calling kthread operate on an address space
    1408             :  * @mm: address space to operate on
    1409             :  */
    1410           0 : void kthread_use_mm(struct mm_struct *mm)
    1411             : {
    1412             :         struct mm_struct *active_mm;
    1413           0 :         struct task_struct *tsk = current;
    1414             : 
    1415           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1416           0 :         WARN_ON_ONCE(tsk->mm);
    1417             : 
    1418           0 :         task_lock(tsk);
    1419             :         /* Hold off tlb flush IPIs while switching mm's */
    1420             :         local_irq_disable();
    1421           0 :         active_mm = tsk->active_mm;
    1422           0 :         if (active_mm != mm) {
    1423           0 :                 mmgrab(mm);
    1424           0 :                 tsk->active_mm = mm;
    1425             :         }
    1426           0 :         tsk->mm = mm;
    1427           0 :         membarrier_update_current_mm(mm);
    1428           0 :         switch_mm_irqs_off(active_mm, mm, tsk);
    1429             :         local_irq_enable();
    1430           0 :         task_unlock(tsk);
    1431             : #ifdef finish_arch_post_lock_switch
    1432             :         finish_arch_post_lock_switch();
    1433             : #endif
    1434             : 
    1435             :         /*
    1436             :          * When a kthread starts operating on an address space, the loop
    1437             :          * in membarrier_{private,global}_expedited() may not observe
    1438             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1439             :          * memory barrier after storing to tsk->mm, before accessing
    1440             :          * user-space memory. A full memory barrier for membarrier
    1441             :          * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
    1442             :          * mmdrop(), or explicitly with smp_mb().
    1443             :          */
    1444           0 :         if (active_mm != mm)
    1445             :                 mmdrop(active_mm);
    1446             :         else
    1447           0 :                 smp_mb();
    1448           0 : }
    1449             : EXPORT_SYMBOL_GPL(kthread_use_mm);
    1450             : 
    1451             : /**
    1452             :  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
    1453             :  * @mm: address space to operate on
    1454             :  */
    1455           0 : void kthread_unuse_mm(struct mm_struct *mm)
    1456             : {
    1457           0 :         struct task_struct *tsk = current;
    1458             : 
    1459           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1460           0 :         WARN_ON_ONCE(!tsk->mm);
    1461             : 
    1462           0 :         task_lock(tsk);
    1463             :         /*
    1464             :          * When a kthread stops operating on an address space, the loop
    1465             :          * in membarrier_{private,global}_expedited() may not observe
    1466             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1467             :          * memory barrier after accessing user-space memory, before
    1468             :          * clearing tsk->mm.
    1469             :          */
    1470             :         smp_mb__after_spinlock();
    1471           0 :         sync_mm_rss(mm);
    1472             :         local_irq_disable();
    1473           0 :         tsk->mm = NULL;
    1474           0 :         membarrier_update_current_mm(NULL);
    1475             :         /* active_mm is still 'mm' */
    1476           0 :         enter_lazy_tlb(mm, tsk);
    1477             :         local_irq_enable();
    1478           0 :         task_unlock(tsk);
    1479           0 : }
    1480             : EXPORT_SYMBOL_GPL(kthread_unuse_mm);
    1481             : 
    1482             : #ifdef CONFIG_BLK_CGROUP
    1483             : /**
    1484             :  * kthread_associate_blkcg - associate blkcg to current kthread
    1485             :  * @css: the cgroup info
    1486             :  *
    1487             :  * Current thread must be a kthread. The thread is running jobs on behalf of
    1488             :  * other threads. In some cases, we expect the jobs attach cgroup info of
    1489             :  * original threads instead of that of current thread. This function stores
    1490             :  * original thread's cgroup info in current kthread context for later
    1491             :  * retrieval.
    1492             :  */
    1493             : void kthread_associate_blkcg(struct cgroup_subsys_state *css)
    1494             : {
    1495             :         struct kthread *kthread;
    1496             : 
    1497             :         if (!(current->flags & PF_KTHREAD))
    1498             :                 return;
    1499             :         kthread = to_kthread(current);
    1500             :         if (!kthread)
    1501             :                 return;
    1502             : 
    1503             :         if (kthread->blkcg_css) {
    1504             :                 css_put(kthread->blkcg_css);
    1505             :                 kthread->blkcg_css = NULL;
    1506             :         }
    1507             :         if (css) {
    1508             :                 css_get(css);
    1509             :                 kthread->blkcg_css = css;
    1510             :         }
    1511             : }
    1512             : EXPORT_SYMBOL(kthread_associate_blkcg);
    1513             : 
    1514             : /**
    1515             :  * kthread_blkcg - get associated blkcg css of current kthread
    1516             :  *
    1517             :  * Current thread must be a kthread.
    1518             :  */
    1519             : struct cgroup_subsys_state *kthread_blkcg(void)
    1520             : {
    1521             :         struct kthread *kthread;
    1522             : 
    1523             :         if (current->flags & PF_KTHREAD) {
    1524             :                 kthread = to_kthread(current);
    1525             :                 if (kthread)
    1526             :                         return kthread->blkcg_css;
    1527             :         }
    1528             :         return NULL;
    1529             : }
    1530             : #endif

Generated by: LCOV version 1.14