LCOV - code coverage report
Current view: top level - kernel - kthread.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 148 438 33.8 %
Date: 2023-08-24 13:40:31 Functions: 21 52 40.4 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /* Kernel thread helper functions.
       3             :  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
       4             :  *   Copyright (C) 2009 Red Hat, Inc.
       5             :  *
       6             :  * Creation is done via kthreadd, so that we get a clean environment
       7             :  * even if we're invoked from userspace (think modprobe, hotplug cpu,
       8             :  * etc.).
       9             :  */
      10             : #include <uapi/linux/sched/types.h>
      11             : #include <linux/mm.h>
      12             : #include <linux/mmu_context.h>
      13             : #include <linux/sched.h>
      14             : #include <linux/sched/mm.h>
      15             : #include <linux/sched/task.h>
      16             : #include <linux/kthread.h>
      17             : #include <linux/completion.h>
      18             : #include <linux/err.h>
      19             : #include <linux/cgroup.h>
      20             : #include <linux/cpuset.h>
      21             : #include <linux/unistd.h>
      22             : #include <linux/file.h>
      23             : #include <linux/export.h>
      24             : #include <linux/mutex.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/freezer.h>
      27             : #include <linux/ptrace.h>
      28             : #include <linux/uaccess.h>
      29             : #include <linux/numa.h>
      30             : #include <linux/sched/isolation.h>
      31             : #include <trace/events/sched.h>
      32             : 
      33             : 
      34             : static DEFINE_SPINLOCK(kthread_create_lock);
      35             : static LIST_HEAD(kthread_create_list);
      36             : struct task_struct *kthreadd_task;
      37             : 
      38             : struct kthread_create_info
      39             : {
      40             :         /* Information passed to kthread() from kthreadd. */
      41             :         char *full_name;
      42             :         int (*threadfn)(void *data);
      43             :         void *data;
      44             :         int node;
      45             : 
      46             :         /* Result passed back to kthread_create() from kthreadd. */
      47             :         struct task_struct *result;
      48             :         struct completion *done;
      49             : 
      50             :         struct list_head list;
      51             : };
      52             : 
      53             : struct kthread {
      54             :         unsigned long flags;
      55             :         unsigned int cpu;
      56             :         int result;
      57             :         int (*threadfn)(void *);
      58             :         void *data;
      59             :         struct completion parked;
      60             :         struct completion exited;
      61             : #ifdef CONFIG_BLK_CGROUP
      62             :         struct cgroup_subsys_state *blkcg_css;
      63             : #endif
      64             :         /* To store the full name if task comm is truncated. */
      65             :         char *full_name;
      66             : };
      67             : 
      68             : enum KTHREAD_BITS {
      69             :         KTHREAD_IS_PER_CPU = 0,
      70             :         KTHREAD_SHOULD_STOP,
      71             :         KTHREAD_SHOULD_PARK,
      72             : };
      73             : 
      74             : static inline struct kthread *to_kthread(struct task_struct *k)
      75             : {
      76        1362 :         WARN_ON(!(k->flags & PF_KTHREAD));
      77        1362 :         return k->worker_private;
      78             : }
      79             : 
      80             : /*
      81             :  * Variant of to_kthread() that doesn't assume @p is a kthread.
      82             :  *
      83             :  * Per construction; when:
      84             :  *
      85             :  *   (p->flags & PF_KTHREAD) && p->worker_private
      86             :  *
      87             :  * the task is both a kthread and struct kthread is persistent. However
      88             :  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
      89             :  * begin_new_exec()).
      90             :  */
      91             : static inline struct kthread *__to_kthread(struct task_struct *p)
      92             : {
      93           0 :         void *kthread = p->worker_private;
      94           0 :         if (kthread && !(p->flags & PF_KTHREAD))
      95           0 :                 kthread = NULL;
      96             :         return kthread;
      97             : }
      98             : 
      99           0 : void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
     100             : {
     101           0 :         struct kthread *kthread = to_kthread(tsk);
     102             : 
     103           0 :         if (!kthread || !kthread->full_name) {
     104           0 :                 __get_task_comm(buf, buf_size, tsk);
     105           0 :                 return;
     106             :         }
     107             : 
     108           0 :         strscpy_pad(buf, kthread->full_name, buf_size);
     109             : }
     110             : 
     111         175 : bool set_kthread_struct(struct task_struct *p)
     112             : {
     113             :         struct kthread *kthread;
     114             : 
     115         350 :         if (WARN_ON_ONCE(to_kthread(p)))
     116             :                 return false;
     117             : 
     118         175 :         kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
     119         175 :         if (!kthread)
     120             :                 return false;
     121             : 
     122         350 :         init_completion(&kthread->exited);
     123         350 :         init_completion(&kthread->parked);
     124         175 :         p->vfork_done = &kthread->exited;
     125             : 
     126         175 :         p->worker_private = kthread;
     127         175 :         return true;
     128             : }
     129             : 
     130         159 : void free_kthread_struct(struct task_struct *k)
     131             : {
     132             :         struct kthread *kthread;
     133             : 
     134             :         /*
     135             :          * Can be NULL if kmalloc() in set_kthread_struct() failed.
     136             :          */
     137         318 :         kthread = to_kthread(k);
     138         159 :         if (!kthread)
     139             :                 return;
     140             : 
     141             : #ifdef CONFIG_BLK_CGROUP
     142             :         WARN_ON_ONCE(kthread->blkcg_css);
     143             : #endif
     144         159 :         k->worker_private = NULL;
     145         159 :         kfree(kthread->full_name);
     146         159 :         kfree(kthread);
     147             : }
     148             : 
     149             : /**
     150             :  * kthread_should_stop - should this kthread return now?
     151             :  *
     152             :  * When someone calls kthread_stop() on your kthread, it will be woken
     153             :  * and this will return true.  You should then return, and your return
     154             :  * value will be passed through to kthread_stop().
     155             :  */
     156         328 : bool kthread_should_stop(void)
     157             : {
     158         984 :         return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
     159             : }
     160             : EXPORT_SYMBOL(kthread_should_stop);
     161             : 
     162         320 : bool __kthread_should_park(struct task_struct *k)
     163             : {
     164         960 :         return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
     165             : }
     166             : EXPORT_SYMBOL_GPL(__kthread_should_park);
     167             : 
     168             : /**
     169             :  * kthread_should_park - should this kthread park now?
     170             :  *
     171             :  * When someone calls kthread_park() on your kthread, it will be woken
     172             :  * and this will return true.  You should then do the necessary
     173             :  * cleanup and call kthread_parkme()
     174             :  *
     175             :  * Similar to kthread_should_stop(), but this keeps the thread alive
     176             :  * and in a park position. kthread_unpark() "restarts" the thread and
     177             :  * calls the thread function again.
     178             :  */
     179         320 : bool kthread_should_park(void)
     180             : {
     181         320 :         return __kthread_should_park(current);
     182             : }
     183             : EXPORT_SYMBOL_GPL(kthread_should_park);
     184             : 
     185           0 : bool kthread_should_stop_or_park(void)
     186             : {
     187           0 :         struct kthread *kthread = __to_kthread(current);
     188             : 
     189           0 :         if (!kthread)
     190             :                 return false;
     191             : 
     192           0 :         return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
     193             : }
     194             : 
     195             : /**
     196             :  * kthread_freezable_should_stop - should this freezable kthread return now?
     197             :  * @was_frozen: optional out parameter, indicates whether %current was frozen
     198             :  *
     199             :  * kthread_should_stop() for freezable kthreads, which will enter
     200             :  * refrigerator if necessary.  This function is safe from kthread_stop() /
     201             :  * freezer deadlock and freezable kthreads should use this function instead
     202             :  * of calling try_to_freeze() directly.
     203             :  */
     204           0 : bool kthread_freezable_should_stop(bool *was_frozen)
     205             : {
     206           0 :         bool frozen = false;
     207             : 
     208             :         might_sleep();
     209             : 
     210           0 :         if (unlikely(freezing(current)))
     211           0 :                 frozen = __refrigerator(true);
     212             : 
     213           0 :         if (was_frozen)
     214           0 :                 *was_frozen = frozen;
     215             : 
     216           0 :         return kthread_should_stop();
     217             : }
     218             : EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
     219             : 
     220             : /**
     221             :  * kthread_func - return the function specified on kthread creation
     222             :  * @task: kthread task in question
     223             :  *
     224             :  * Returns NULL if the task is not a kthread.
     225             :  */
     226           0 : void *kthread_func(struct task_struct *task)
     227             : {
     228           0 :         struct kthread *kthread = __to_kthread(task);
     229           0 :         if (kthread)
     230           0 :                 return kthread->threadfn;
     231             :         return NULL;
     232             : }
     233             : EXPORT_SYMBOL_GPL(kthread_func);
     234             : 
     235             : /**
     236             :  * kthread_data - return data value specified on kthread creation
     237             :  * @task: kthread task in question
     238             :  *
     239             :  * Return the data value specified when kthread @task was created.
     240             :  * The caller is responsible for ensuring the validity of @task when
     241             :  * calling this function.
     242             :  */
     243          39 : void *kthread_data(struct task_struct *task)
     244             : {
     245          78 :         return to_kthread(task)->data;
     246             : }
     247             : EXPORT_SYMBOL_GPL(kthread_data);
     248             : 
     249             : /**
     250             :  * kthread_probe_data - speculative version of kthread_data()
     251             :  * @task: possible kthread task in question
     252             :  *
     253             :  * @task could be a kthread task.  Return the data value specified when it
     254             :  * was created if accessible.  If @task isn't a kthread task or its data is
     255             :  * inaccessible for any reason, %NULL is returned.  This function requires
     256             :  * that @task itself is safe to dereference.
     257             :  */
     258           0 : void *kthread_probe_data(struct task_struct *task)
     259             : {
     260           0 :         struct kthread *kthread = __to_kthread(task);
     261           0 :         void *data = NULL;
     262             : 
     263           0 :         if (kthread)
     264           0 :                 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
     265           0 :         return data;
     266             : }
     267             : 
     268         173 : static void __kthread_parkme(struct kthread *self)
     269             : {
     270             :         for (;;) {
     271             :                 /*
     272             :                  * TASK_PARKED is a special state; we must serialize against
     273             :                  * possible pending wakeups to avoid store-store collisions on
     274             :                  * task->state.
     275             :                  *
     276             :                  * Such a collision might possibly result in the task state
     277             :                  * changin from TASK_PARKED and us failing the
     278             :                  * wait_task_inactive() in kthread_park().
     279             :                  */
     280         871 :                 set_special_state(TASK_PARKED);
     281         348 :                 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
     282             :                         break;
     283             : 
     284             :                 /*
     285             :                  * Thread is going to call schedule(), do not preempt it,
     286             :                  * or the caller of kthread_park() may spend more time in
     287             :                  * wait_task_inactive().
     288             :                  */
     289           1 :                 preempt_disable();
     290           1 :                 complete(&self->parked);
     291           1 :                 schedule_preempt_disabled();
     292           1 :                 preempt_enable();
     293             :         }
     294         173 :         __set_current_state(TASK_RUNNING);
     295         173 : }
     296             : 
     297           0 : void kthread_parkme(void)
     298             : {
     299           0 :         __kthread_parkme(to_kthread(current));
     300           0 : }
     301             : EXPORT_SYMBOL_GPL(kthread_parkme);
     302             : 
     303             : /**
     304             :  * kthread_exit - Cause the current kthread return @result to kthread_stop().
     305             :  * @result: The integer value to return to kthread_stop().
     306             :  *
     307             :  * While kthread_exit can be called directly, it exists so that
     308             :  * functions which do some additional work in non-modular code such as
     309             :  * module_put_and_kthread_exit can be implemented.
     310             :  *
     311             :  * Does not return.
     312             :  */
     313         160 : void __noreturn kthread_exit(long result)
     314             : {
     315         320 :         struct kthread *kthread = to_kthread(current);
     316         160 :         kthread->result = result;
     317         160 :         do_exit(0);
     318             : }
     319             : 
     320             : /**
     321             :  * kthread_complete_and_exit - Exit the current kthread.
     322             :  * @comp: Completion to complete
     323             :  * @code: The integer value to return to kthread_stop().
     324             :  *
     325             :  * If present, complete @comp and then return code to kthread_stop().
     326             :  *
     327             :  * A kernel thread whose module may be removed after the completion of
     328             :  * @comp can use this function to exit safely.
     329             :  *
     330             :  * Does not return.
     331             :  */
     332         160 : void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
     333             : {
     334         160 :         if (comp)
     335         160 :                 complete(comp);
     336             : 
     337         160 :         kthread_exit(code);
     338             : }
     339             : EXPORT_SYMBOL(kthread_complete_and_exit);
     340             : 
     341         173 : static int kthread(void *_create)
     342             : {
     343             :         static const struct sched_param param = { .sched_priority = 0 };
     344             :         /* Copy data: it's on kthread's stack */
     345         173 :         struct kthread_create_info *create = _create;
     346         173 :         int (*threadfn)(void *data) = create->threadfn;
     347         173 :         void *data = create->data;
     348             :         struct completion *done;
     349             :         struct kthread *self;
     350             :         int ret;
     351             : 
     352         346 :         self = to_kthread(current);
     353             : 
     354             :         /* Release the structure when caller killed by a fatal signal. */
     355         346 :         done = xchg(&create->done, NULL);
     356         173 :         if (!done) {
     357           0 :                 kfree(create->full_name);
     358           0 :                 kfree(create);
     359           0 :                 kthread_exit(-EINTR);
     360             :         }
     361             : 
     362         173 :         self->full_name = create->full_name;
     363         173 :         self->threadfn = threadfn;
     364         173 :         self->data = data;
     365             : 
     366             :         /*
     367             :          * The new thread inherited kthreadd's priority and CPU mask. Reset
     368             :          * back to default in case they have been changed.
     369             :          */
     370         173 :         sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
     371         519 :         set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
     372             : 
     373             :         /* OK, tell user we're spawned, wait for stop or wakeup */
     374         173 :         __set_current_state(TASK_UNINTERRUPTIBLE);
     375         173 :         create->result = current;
     376             :         /*
     377             :          * Thread is going to call schedule(), do not preempt it,
     378             :          * or the creator may spend more time in wait_task_inactive().
     379             :          */
     380         173 :         preempt_disable();
     381         173 :         complete(done);
     382         173 :         schedule_preempt_disabled();
     383         173 :         preempt_enable();
     384             : 
     385         173 :         ret = -EINTR;
     386         346 :         if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
     387             :                 cgroup_kthread_ready();
     388         173 :                 __kthread_parkme(self);
     389         173 :                 ret = threadfn(data);
     390             :         }
     391           0 :         kthread_exit(ret);
     392             : }
     393             : 
     394             : /* called from kernel_clone() to get node information for about to be created task */
     395         175 : int tsk_fork_get_node(struct task_struct *tsk)
     396             : {
     397             : #ifdef CONFIG_NUMA
     398             :         if (tsk == kthreadd_task)
     399             :                 return tsk->pref_node_fork;
     400             : #endif
     401         175 :         return NUMA_NO_NODE;
     402             : }
     403             : 
     404         173 : static void create_kthread(struct kthread_create_info *create)
     405             : {
     406             :         int pid;
     407             : 
     408             : #ifdef CONFIG_NUMA
     409             :         current->pref_node_fork = create->node;
     410             : #endif
     411             :         /* We want our own signal handler (we take no signals by default). */
     412         173 :         pid = kernel_thread(kthread, create, create->full_name,
     413             :                             CLONE_FS | CLONE_FILES | SIGCHLD);
     414         173 :         if (pid < 0) {
     415             :                 /* Release the structure when caller killed by a fatal signal. */
     416           0 :                 struct completion *done = xchg(&create->done, NULL);
     417             : 
     418           0 :                 kfree(create->full_name);
     419           0 :                 if (!done) {
     420           0 :                         kfree(create);
     421           0 :                         return;
     422             :                 }
     423           0 :                 create->result = ERR_PTR(pid);
     424           0 :                 complete(done);
     425             :         }
     426             : }
     427             : 
     428             : static __printf(4, 0)
     429         173 : struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
     430             :                                                     void *data, int node,
     431             :                                                     const char namefmt[],
     432             :                                                     va_list args)
     433             : {
     434         173 :         DECLARE_COMPLETION_ONSTACK(done);
     435             :         struct task_struct *task;
     436         173 :         struct kthread_create_info *create = kmalloc(sizeof(*create),
     437             :                                                      GFP_KERNEL);
     438             : 
     439         173 :         if (!create)
     440             :                 return ERR_PTR(-ENOMEM);
     441         173 :         create->threadfn = threadfn;
     442         173 :         create->data = data;
     443         173 :         create->node = node;
     444         173 :         create->done = &done;
     445         173 :         create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
     446         173 :         if (!create->full_name) {
     447             :                 task = ERR_PTR(-ENOMEM);
     448             :                 goto free_create;
     449             :         }
     450             : 
     451         173 :         spin_lock(&kthread_create_lock);
     452         346 :         list_add_tail(&create->list, &kthread_create_list);
     453         173 :         spin_unlock(&kthread_create_lock);
     454             : 
     455         173 :         wake_up_process(kthreadd_task);
     456             :         /*
     457             :          * Wait for completion in killable state, for I might be chosen by
     458             :          * the OOM killer while kthreadd is trying to allocate memory for
     459             :          * new kernel thread.
     460             :          */
     461         173 :         if (unlikely(wait_for_completion_killable(&done))) {
     462             :                 /*
     463             :                  * If I was killed by a fatal signal before kthreadd (or new
     464             :                  * kernel thread) calls complete(), leave the cleanup of this
     465             :                  * structure to that thread.
     466             :                  */
     467           0 :                 if (xchg(&create->done, NULL))
     468             :                         return ERR_PTR(-EINTR);
     469             :                 /*
     470             :                  * kthreadd (or new kernel thread) will call complete()
     471             :                  * shortly.
     472             :                  */
     473           0 :                 wait_for_completion(&done);
     474             :         }
     475         173 :         task = create->result;
     476             : free_create:
     477         173 :         kfree(create);
     478         173 :         return task;
     479             : }
     480             : 
     481             : /**
     482             :  * kthread_create_on_node - create a kthread.
     483             :  * @threadfn: the function to run until signal_pending(current).
     484             :  * @data: data ptr for @threadfn.
     485             :  * @node: task and thread structures for the thread are allocated on this node
     486             :  * @namefmt: printf-style name for the thread.
     487             :  *
     488             :  * Description: This helper function creates and names a kernel
     489             :  * thread.  The thread will be stopped: use wake_up_process() to start
     490             :  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
     491             :  * is affine to all CPUs.
     492             :  *
     493             :  * If thread is going to be bound on a particular cpu, give its node
     494             :  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
     495             :  * When woken, the thread will run @threadfn() with @data as its
     496             :  * argument. @threadfn() can either return directly if it is a
     497             :  * standalone thread for which no one will call kthread_stop(), or
     498             :  * return when 'kthread_should_stop()' is true (which means
     499             :  * kthread_stop() has been called).  The return value should be zero
     500             :  * or a negative error number; it will be passed to kthread_stop().
     501             :  *
     502             :  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
     503             :  */
     504         173 : struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
     505             :                                            void *data, int node,
     506             :                                            const char namefmt[],
     507             :                                            ...)
     508             : {
     509             :         struct task_struct *task;
     510             :         va_list args;
     511             : 
     512         173 :         va_start(args, namefmt);
     513         173 :         task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
     514         173 :         va_end(args);
     515             : 
     516         173 :         return task;
     517             : }
     518             : EXPORT_SYMBOL(kthread_create_on_node);
     519             : 
     520          11 : static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
     521             : {
     522             :         unsigned long flags;
     523             : 
     524          11 :         if (!wait_task_inactive(p, state)) {
     525           0 :                 WARN_ON(1);
     526             :                 return;
     527             :         }
     528             : 
     529             :         /* It's safe because the task is inactive. */
     530          11 :         raw_spin_lock_irqsave(&p->pi_lock, flags);
     531          11 :         do_set_cpus_allowed(p, mask);
     532          11 :         p->flags |= PF_NO_SETAFFINITY;
     533          22 :         raw_spin_unlock_irqrestore(&p->pi_lock, flags);
     534             : }
     535             : 
     536             : static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
     537             : {
     538           2 :         __kthread_bind_mask(p, cpumask_of(cpu), state);
     539             : }
     540             : 
     541           9 : void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
     542             : {
     543           9 :         __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
     544           9 : }
     545             : 
     546             : /**
     547             :  * kthread_bind - bind a just-created kthread to a cpu.
     548             :  * @p: thread created by kthread_create().
     549             :  * @cpu: cpu (might not be online, must be possible) for @k to run on.
     550             :  *
     551             :  * Description: This function is equivalent to set_cpus_allowed(),
     552             :  * except that @cpu doesn't need to be online, and the thread must be
     553             :  * stopped (i.e., just returned from kthread_create()).
     554             :  */
     555           0 : void kthread_bind(struct task_struct *p, unsigned int cpu)
     556             : {
     557           2 :         __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
     558           0 : }
     559             : EXPORT_SYMBOL(kthread_bind);
     560             : 
     561             : /**
     562             :  * kthread_create_on_cpu - Create a cpu bound kthread
     563             :  * @threadfn: the function to run until signal_pending(current).
     564             :  * @data: data ptr for @threadfn.
     565             :  * @cpu: The cpu on which the thread should be bound,
     566             :  * @namefmt: printf-style name for the thread. Format is restricted
     567             :  *           to "name.*%u". Code fills in cpu number.
     568             :  *
     569             :  * Description: This helper function creates and names a kernel thread
     570             :  */
     571           1 : struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
     572             :                                           void *data, unsigned int cpu,
     573             :                                           const char *namefmt)
     574             : {
     575             :         struct task_struct *p;
     576             : 
     577           1 :         p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
     578             :                                    cpu);
     579           1 :         if (IS_ERR(p))
     580             :                 return p;
     581           1 :         kthread_bind(p, cpu);
     582             :         /* CPU hotplug need to bind once again when unparking the thread. */
     583           2 :         to_kthread(p)->cpu = cpu;
     584           1 :         return p;
     585             : }
     586             : EXPORT_SYMBOL(kthread_create_on_cpu);
     587             : 
     588           5 : void kthread_set_per_cpu(struct task_struct *k, int cpu)
     589             : {
     590          10 :         struct kthread *kthread = to_kthread(k);
     591           5 :         if (!kthread)
     592             :                 return;
     593             : 
     594           5 :         WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
     595             : 
     596           5 :         if (cpu < 0) {
     597           0 :                 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     598             :                 return;
     599             :         }
     600             : 
     601           5 :         kthread->cpu = cpu;
     602           5 :         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     603             : }
     604             : 
     605           0 : bool kthread_is_per_cpu(struct task_struct *p)
     606             : {
     607           0 :         struct kthread *kthread = __to_kthread(p);
     608           0 :         if (!kthread)
     609             :                 return false;
     610             : 
     611           0 :         return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     612             : }
     613             : 
     614             : /**
     615             :  * kthread_unpark - unpark a thread created by kthread_create().
     616             :  * @k:          thread created by kthread_create().
     617             :  *
     618             :  * Sets kthread_should_park() for @k to return false, wakes it, and
     619             :  * waits for it to return. If the thread is marked percpu then its
     620             :  * bound to the cpu again.
     621             :  */
     622           1 : void kthread_unpark(struct task_struct *k)
     623             : {
     624           2 :         struct kthread *kthread = to_kthread(k);
     625             : 
     626             :         /*
     627             :          * Newly created kthread was parked when the CPU was offline.
     628             :          * The binding was lost and we need to set it again.
     629             :          */
     630           2 :         if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
     631           1 :                 __kthread_bind(k, kthread->cpu, TASK_PARKED);
     632             : 
     633           2 :         clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     634             :         /*
     635             :          * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
     636             :          */
     637           1 :         wake_up_state(k, TASK_PARKED);
     638           1 : }
     639             : EXPORT_SYMBOL_GPL(kthread_unpark);
     640             : 
     641             : /**
     642             :  * kthread_park - park a thread created by kthread_create().
     643             :  * @k: thread created by kthread_create().
     644             :  *
     645             :  * Sets kthread_should_park() for @k to return true, wakes it, and
     646             :  * waits for it to return. This can also be called after kthread_create()
     647             :  * instead of calling wake_up_process(): the thread will park without
     648             :  * calling threadfn().
     649             :  *
     650             :  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
     651             :  * If called by the kthread itself just the park bit is set.
     652             :  */
     653           1 : int kthread_park(struct task_struct *k)
     654             : {
     655           2 :         struct kthread *kthread = to_kthread(k);
     656             : 
     657           1 :         if (WARN_ON(k->flags & PF_EXITING))
     658             :                 return -ENOSYS;
     659             : 
     660           2 :         if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
     661             :                 return -EBUSY;
     662             : 
     663           2 :         set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     664           1 :         if (k != current) {
     665           1 :                 wake_up_process(k);
     666             :                 /*
     667             :                  * Wait for __kthread_parkme() to complete(), this means we
     668             :                  * _will_ have TASK_PARKED and are about to call schedule().
     669             :                  */
     670           1 :                 wait_for_completion(&kthread->parked);
     671             :                 /*
     672             :                  * Now wait for that schedule() to complete and the task to
     673             :                  * get scheduled out.
     674             :                  */
     675           1 :                 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
     676             :         }
     677             : 
     678             :         return 0;
     679             : }
     680             : EXPORT_SYMBOL_GPL(kthread_park);
     681             : 
     682             : /**
     683             :  * kthread_stop - stop a thread created by kthread_create().
     684             :  * @k: thread created by kthread_create().
     685             :  *
     686             :  * Sets kthread_should_stop() for @k to return true, wakes it, and
     687             :  * waits for it to exit. This can also be called after kthread_create()
     688             :  * instead of calling wake_up_process(): the thread will exit without
     689             :  * calling threadfn().
     690             :  *
     691             :  * If threadfn() may call kthread_exit() itself, the caller must ensure
     692             :  * task_struct can't go away.
     693             :  *
     694             :  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
     695             :  * was never called.
     696             :  */
     697           0 : int kthread_stop(struct task_struct *k)
     698             : {
     699             :         struct kthread *kthread;
     700             :         int ret;
     701             : 
     702           0 :         trace_sched_kthread_stop(k);
     703             : 
     704           0 :         get_task_struct(k);
     705           0 :         kthread = to_kthread(k);
     706           0 :         set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
     707           0 :         kthread_unpark(k);
     708           0 :         set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
     709           0 :         wake_up_process(k);
     710           0 :         wait_for_completion(&kthread->exited);
     711           0 :         ret = kthread->result;
     712           0 :         put_task_struct(k);
     713             : 
     714           0 :         trace_sched_kthread_stop_ret(ret);
     715           0 :         return ret;
     716             : }
     717             : EXPORT_SYMBOL(kthread_stop);
     718             : 
     719           1 : int kthreadd(void *unused)
     720             : {
     721           1 :         struct task_struct *tsk = current;
     722             : 
     723             :         /* Setup a clean context for our children to inherit. */
     724           1 :         set_task_comm(tsk, "kthreadd");
     725           1 :         ignore_signals(tsk);
     726           2 :         set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
     727           1 :         set_mems_allowed(node_states[N_MEMORY]);
     728             : 
     729           1 :         current->flags |= PF_NOFREEZE;
     730             :         cgroup_init_kthreadd();
     731             : 
     732             :         for (;;) {
     733         173 :                 set_current_state(TASK_INTERRUPTIBLE);
     734         173 :                 if (list_empty(&kthread_create_list))
     735         173 :                         schedule();
     736         172 :                 __set_current_state(TASK_RUNNING);
     737             : 
     738             :                 spin_lock(&kthread_create_lock);
     739         345 :                 while (!list_empty(&kthread_create_list)) {
     740             :                         struct kthread_create_info *create;
     741             : 
     742         173 :                         create = list_entry(kthread_create_list.next,
     743             :                                             struct kthread_create_info, list);
     744         346 :                         list_del_init(&create->list);
     745         173 :                         spin_unlock(&kthread_create_lock);
     746             : 
     747         173 :                         create_kthread(create);
     748             : 
     749             :                         spin_lock(&kthread_create_lock);
     750             :                 }
     751             :                 spin_unlock(&kthread_create_lock);
     752             :         }
     753             : 
     754             :         return 0;
     755             : }
     756             : 
     757           0 : void __kthread_init_worker(struct kthread_worker *worker,
     758             :                                 const char *name,
     759             :                                 struct lock_class_key *key)
     760             : {
     761           0 :         memset(worker, 0, sizeof(struct kthread_worker));
     762             :         raw_spin_lock_init(&worker->lock);
     763             :         lockdep_set_class_and_name(&worker->lock, key, name);
     764           0 :         INIT_LIST_HEAD(&worker->work_list);
     765           0 :         INIT_LIST_HEAD(&worker->delayed_work_list);
     766           0 : }
     767             : EXPORT_SYMBOL_GPL(__kthread_init_worker);
     768             : 
     769             : /**
     770             :  * kthread_worker_fn - kthread function to process kthread_worker
     771             :  * @worker_ptr: pointer to initialized kthread_worker
     772             :  *
     773             :  * This function implements the main cycle of kthread worker. It processes
     774             :  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
     775             :  * is empty.
     776             :  *
     777             :  * The works are not allowed to keep any locks, disable preemption or interrupts
     778             :  * when they finish. There is defined a safe point for freezing when one work
     779             :  * finishes and before a new one is started.
     780             :  *
     781             :  * Also the works must not be handled by more than one worker at the same time,
     782             :  * see also kthread_queue_work().
     783             :  */
     784           0 : int kthread_worker_fn(void *worker_ptr)
     785             : {
     786           0 :         struct kthread_worker *worker = worker_ptr;
     787             :         struct kthread_work *work;
     788             : 
     789             :         /*
     790             :          * FIXME: Update the check and remove the assignment when all kthread
     791             :          * worker users are created using kthread_create_worker*() functions.
     792             :          */
     793           0 :         WARN_ON(worker->task && worker->task != current);
     794           0 :         worker->task = current;
     795             : 
     796           0 :         if (worker->flags & KTW_FREEZABLE)
     797           0 :                 set_freezable();
     798             : 
     799             : repeat:
     800           0 :         set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
     801             : 
     802           0 :         if (kthread_should_stop()) {
     803           0 :                 __set_current_state(TASK_RUNNING);
     804           0 :                 raw_spin_lock_irq(&worker->lock);
     805           0 :                 worker->task = NULL;
     806           0 :                 raw_spin_unlock_irq(&worker->lock);
     807           0 :                 return 0;
     808             :         }
     809             : 
     810           0 :         work = NULL;
     811           0 :         raw_spin_lock_irq(&worker->lock);
     812           0 :         if (!list_empty(&worker->work_list)) {
     813           0 :                 work = list_first_entry(&worker->work_list,
     814             :                                         struct kthread_work, node);
     815           0 :                 list_del_init(&work->node);
     816             :         }
     817           0 :         worker->current_work = work;
     818           0 :         raw_spin_unlock_irq(&worker->lock);
     819             : 
     820           0 :         if (work) {
     821           0 :                 kthread_work_func_t func = work->func;
     822           0 :                 __set_current_state(TASK_RUNNING);
     823           0 :                 trace_sched_kthread_work_execute_start(work);
     824           0 :                 work->func(work);
     825             :                 /*
     826             :                  * Avoid dereferencing work after this point.  The trace
     827             :                  * event only cares about the address.
     828             :                  */
     829           0 :                 trace_sched_kthread_work_execute_end(work, func);
     830           0 :         } else if (!freezing(current))
     831           0 :                 schedule();
     832             : 
     833           0 :         try_to_freeze();
     834           0 :         cond_resched();
     835           0 :         goto repeat;
     836             : }
     837             : EXPORT_SYMBOL_GPL(kthread_worker_fn);
     838             : 
     839             : static __printf(3, 0) struct kthread_worker *
     840           0 : __kthread_create_worker(int cpu, unsigned int flags,
     841             :                         const char namefmt[], va_list args)
     842             : {
     843             :         struct kthread_worker *worker;
     844             :         struct task_struct *task;
     845           0 :         int node = NUMA_NO_NODE;
     846             : 
     847           0 :         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
     848           0 :         if (!worker)
     849             :                 return ERR_PTR(-ENOMEM);
     850             : 
     851           0 :         kthread_init_worker(worker);
     852             : 
     853           0 :         if (cpu >= 0)
     854           0 :                 node = cpu_to_node(cpu);
     855             : 
     856           0 :         task = __kthread_create_on_node(kthread_worker_fn, worker,
     857             :                                                 node, namefmt, args);
     858           0 :         if (IS_ERR(task))
     859             :                 goto fail_task;
     860             : 
     861           0 :         if (cpu >= 0)
     862           0 :                 kthread_bind(task, cpu);
     863             : 
     864           0 :         worker->flags = flags;
     865           0 :         worker->task = task;
     866           0 :         wake_up_process(task);
     867           0 :         return worker;
     868             : 
     869             : fail_task:
     870           0 :         kfree(worker);
     871           0 :         return ERR_CAST(task);
     872             : }
     873             : 
     874             : /**
     875             :  * kthread_create_worker - create a kthread worker
     876             :  * @flags: flags modifying the default behavior of the worker
     877             :  * @namefmt: printf-style name for the kthread worker (task).
     878             :  *
     879             :  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     880             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     881             :  * when the caller was killed by a fatal signal.
     882             :  */
     883             : struct kthread_worker *
     884           0 : kthread_create_worker(unsigned int flags, const char namefmt[], ...)
     885             : {
     886             :         struct kthread_worker *worker;
     887             :         va_list args;
     888             : 
     889           0 :         va_start(args, namefmt);
     890           0 :         worker = __kthread_create_worker(-1, flags, namefmt, args);
     891           0 :         va_end(args);
     892             : 
     893           0 :         return worker;
     894             : }
     895             : EXPORT_SYMBOL(kthread_create_worker);
     896             : 
     897             : /**
     898             :  * kthread_create_worker_on_cpu - create a kthread worker and bind it
     899             :  *      to a given CPU and the associated NUMA node.
     900             :  * @cpu: CPU number
     901             :  * @flags: flags modifying the default behavior of the worker
     902             :  * @namefmt: printf-style name for the kthread worker (task).
     903             :  *
     904             :  * Use a valid CPU number if you want to bind the kthread worker
     905             :  * to the given CPU and the associated NUMA node.
     906             :  *
     907             :  * A good practice is to add the cpu number also into the worker name.
     908             :  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
     909             :  *
     910             :  * CPU hotplug:
     911             :  * The kthread worker API is simple and generic. It just provides a way
     912             :  * to create, use, and destroy workers.
     913             :  *
     914             :  * It is up to the API user how to handle CPU hotplug. They have to decide
     915             :  * how to handle pending work items, prevent queuing new ones, and
     916             :  * restore the functionality when the CPU goes off and on. There are a
     917             :  * few catches:
     918             :  *
     919             :  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
     920             :  *
     921             :  *    - The worker might not exist when the CPU was off when the user
     922             :  *      created the workers.
     923             :  *
     924             :  * Good practice is to implement two CPU hotplug callbacks and to
     925             :  * destroy/create the worker when the CPU goes down/up.
     926             :  *
     927             :  * Return:
     928             :  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     929             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     930             :  * when the caller was killed by a fatal signal.
     931             :  */
     932             : struct kthread_worker *
     933           0 : kthread_create_worker_on_cpu(int cpu, unsigned int flags,
     934             :                              const char namefmt[], ...)
     935             : {
     936             :         struct kthread_worker *worker;
     937             :         va_list args;
     938             : 
     939           0 :         va_start(args, namefmt);
     940           0 :         worker = __kthread_create_worker(cpu, flags, namefmt, args);
     941           0 :         va_end(args);
     942             : 
     943           0 :         return worker;
     944             : }
     945             : EXPORT_SYMBOL(kthread_create_worker_on_cpu);
     946             : 
     947             : /*
     948             :  * Returns true when the work could not be queued at the moment.
     949             :  * It happens when it is already pending in a worker list
     950             :  * or when it is being cancelled.
     951             :  */
     952             : static inline bool queuing_blocked(struct kthread_worker *worker,
     953             :                                    struct kthread_work *work)
     954             : {
     955             :         lockdep_assert_held(&worker->lock);
     956             : 
     957           0 :         return !list_empty(&work->node) || work->canceling;
     958             : }
     959             : 
     960           0 : static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
     961             :                                              struct kthread_work *work)
     962             : {
     963             :         lockdep_assert_held(&worker->lock);
     964           0 :         WARN_ON_ONCE(!list_empty(&work->node));
     965             :         /* Do not use a work with >1 worker, see kthread_queue_work() */
     966           0 :         WARN_ON_ONCE(work->worker && work->worker != worker);
     967           0 : }
     968             : 
     969             : /* insert @work before @pos in @worker */
     970           0 : static void kthread_insert_work(struct kthread_worker *worker,
     971             :                                 struct kthread_work *work,
     972             :                                 struct list_head *pos)
     973             : {
     974           0 :         kthread_insert_work_sanity_check(worker, work);
     975             : 
     976           0 :         trace_sched_kthread_work_queue_work(worker, work);
     977             : 
     978           0 :         list_add_tail(&work->node, pos);
     979           0 :         work->worker = worker;
     980           0 :         if (!worker->current_work && likely(worker->task))
     981           0 :                 wake_up_process(worker->task);
     982           0 : }
     983             : 
     984             : /**
     985             :  * kthread_queue_work - queue a kthread_work
     986             :  * @worker: target kthread_worker
     987             :  * @work: kthread_work to queue
     988             :  *
     989             :  * Queue @work to work processor @task for async execution.  @task
     990             :  * must have been created with kthread_worker_create().  Returns %true
     991             :  * if @work was successfully queued, %false if it was already pending.
     992             :  *
     993             :  * Reinitialize the work if it needs to be used by another worker.
     994             :  * For example, when the worker was stopped and started again.
     995             :  */
     996           0 : bool kthread_queue_work(struct kthread_worker *worker,
     997             :                         struct kthread_work *work)
     998             : {
     999           0 :         bool ret = false;
    1000             :         unsigned long flags;
    1001             : 
    1002           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1003           0 :         if (!queuing_blocked(worker, work)) {
    1004           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1005           0 :                 ret = true;
    1006             :         }
    1007           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1008           0 :         return ret;
    1009             : }
    1010             : EXPORT_SYMBOL_GPL(kthread_queue_work);
    1011             : 
    1012             : /**
    1013             :  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
    1014             :  *      delayed work when the timer expires.
    1015             :  * @t: pointer to the expired timer
    1016             :  *
    1017             :  * The format of the function is defined by struct timer_list.
    1018             :  * It should have been called from irqsafe timer with irq already off.
    1019             :  */
    1020           0 : void kthread_delayed_work_timer_fn(struct timer_list *t)
    1021             : {
    1022           0 :         struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
    1023           0 :         struct kthread_work *work = &dwork->work;
    1024           0 :         struct kthread_worker *worker = work->worker;
    1025             :         unsigned long flags;
    1026             : 
    1027             :         /*
    1028             :          * This might happen when a pending work is reinitialized.
    1029             :          * It means that it is used a wrong way.
    1030             :          */
    1031           0 :         if (WARN_ON_ONCE(!worker))
    1032             :                 return;
    1033             : 
    1034           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1035             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1036           0 :         WARN_ON_ONCE(work->worker != worker);
    1037             : 
    1038             :         /* Move the work from worker->delayed_work_list. */
    1039           0 :         WARN_ON_ONCE(list_empty(&work->node));
    1040           0 :         list_del_init(&work->node);
    1041           0 :         if (!work->canceling)
    1042           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1043             : 
    1044           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1045             : }
    1046             : EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
    1047             : 
    1048           0 : static void __kthread_queue_delayed_work(struct kthread_worker *worker,
    1049             :                                          struct kthread_delayed_work *dwork,
    1050             :                                          unsigned long delay)
    1051             : {
    1052           0 :         struct timer_list *timer = &dwork->timer;
    1053           0 :         struct kthread_work *work = &dwork->work;
    1054             : 
    1055           0 :         WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
    1056             : 
    1057             :         /*
    1058             :          * If @delay is 0, queue @dwork->work immediately.  This is for
    1059             :          * both optimization and correctness.  The earliest @timer can
    1060             :          * expire is on the closest next tick and delayed_work users depend
    1061             :          * on that there's no such delay when @delay is 0.
    1062             :          */
    1063           0 :         if (!delay) {
    1064           0 :                 kthread_insert_work(worker, work, &worker->work_list);
    1065           0 :                 return;
    1066             :         }
    1067             : 
    1068             :         /* Be paranoid and try to detect possible races already now. */
    1069           0 :         kthread_insert_work_sanity_check(worker, work);
    1070             : 
    1071           0 :         list_add(&work->node, &worker->delayed_work_list);
    1072           0 :         work->worker = worker;
    1073           0 :         timer->expires = jiffies + delay;
    1074           0 :         add_timer(timer);
    1075             : }
    1076             : 
    1077             : /**
    1078             :  * kthread_queue_delayed_work - queue the associated kthread work
    1079             :  *      after a delay.
    1080             :  * @worker: target kthread_worker
    1081             :  * @dwork: kthread_delayed_work to queue
    1082             :  * @delay: number of jiffies to wait before queuing
    1083             :  *
    1084             :  * If the work has not been pending it starts a timer that will queue
    1085             :  * the work after the given @delay. If @delay is zero, it queues the
    1086             :  * work immediately.
    1087             :  *
    1088             :  * Return: %false if the @work has already been pending. It means that
    1089             :  * either the timer was running or the work was queued. It returns %true
    1090             :  * otherwise.
    1091             :  */
    1092           0 : bool kthread_queue_delayed_work(struct kthread_worker *worker,
    1093             :                                 struct kthread_delayed_work *dwork,
    1094             :                                 unsigned long delay)
    1095             : {
    1096           0 :         struct kthread_work *work = &dwork->work;
    1097             :         unsigned long flags;
    1098           0 :         bool ret = false;
    1099             : 
    1100           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1101             : 
    1102           0 :         if (!queuing_blocked(worker, work)) {
    1103           0 :                 __kthread_queue_delayed_work(worker, dwork, delay);
    1104           0 :                 ret = true;
    1105             :         }
    1106             : 
    1107           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1108           0 :         return ret;
    1109             : }
    1110             : EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
    1111             : 
    1112             : struct kthread_flush_work {
    1113             :         struct kthread_work     work;
    1114             :         struct completion       done;
    1115             : };
    1116             : 
    1117           0 : static void kthread_flush_work_fn(struct kthread_work *work)
    1118             : {
    1119           0 :         struct kthread_flush_work *fwork =
    1120           0 :                 container_of(work, struct kthread_flush_work, work);
    1121           0 :         complete(&fwork->done);
    1122           0 : }
    1123             : 
    1124             : /**
    1125             :  * kthread_flush_work - flush a kthread_work
    1126             :  * @work: work to flush
    1127             :  *
    1128             :  * If @work is queued or executing, wait for it to finish execution.
    1129             :  */
    1130           0 : void kthread_flush_work(struct kthread_work *work)
    1131             : {
    1132           0 :         struct kthread_flush_work fwork = {
    1133             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1134           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1135             :         };
    1136             :         struct kthread_worker *worker;
    1137           0 :         bool noop = false;
    1138             : 
    1139           0 :         worker = work->worker;
    1140           0 :         if (!worker)
    1141           0 :                 return;
    1142             : 
    1143           0 :         raw_spin_lock_irq(&worker->lock);
    1144             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1145           0 :         WARN_ON_ONCE(work->worker != worker);
    1146             : 
    1147           0 :         if (!list_empty(&work->node))
    1148           0 :                 kthread_insert_work(worker, &fwork.work, work->node.next);
    1149           0 :         else if (worker->current_work == work)
    1150           0 :                 kthread_insert_work(worker, &fwork.work,
    1151             :                                     worker->work_list.next);
    1152             :         else
    1153             :                 noop = true;
    1154             : 
    1155           0 :         raw_spin_unlock_irq(&worker->lock);
    1156             : 
    1157           0 :         if (!noop)
    1158           0 :                 wait_for_completion(&fwork.done);
    1159             : }
    1160             : EXPORT_SYMBOL_GPL(kthread_flush_work);
    1161             : 
    1162             : /*
    1163             :  * Make sure that the timer is neither set nor running and could
    1164             :  * not manipulate the work list_head any longer.
    1165             :  *
    1166             :  * The function is called under worker->lock. The lock is temporary
    1167             :  * released but the timer can't be set again in the meantime.
    1168             :  */
    1169           0 : static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
    1170             :                                               unsigned long *flags)
    1171             : {
    1172           0 :         struct kthread_delayed_work *dwork =
    1173           0 :                 container_of(work, struct kthread_delayed_work, work);
    1174           0 :         struct kthread_worker *worker = work->worker;
    1175             : 
    1176             :         /*
    1177             :          * del_timer_sync() must be called to make sure that the timer
    1178             :          * callback is not running. The lock must be temporary released
    1179             :          * to avoid a deadlock with the callback. In the meantime,
    1180             :          * any queuing is blocked by setting the canceling counter.
    1181             :          */
    1182           0 :         work->canceling++;
    1183           0 :         raw_spin_unlock_irqrestore(&worker->lock, *flags);
    1184           0 :         del_timer_sync(&dwork->timer);
    1185           0 :         raw_spin_lock_irqsave(&worker->lock, *flags);
    1186           0 :         work->canceling--;
    1187           0 : }
    1188             : 
    1189             : /*
    1190             :  * This function removes the work from the worker queue.
    1191             :  *
    1192             :  * It is called under worker->lock. The caller must make sure that
    1193             :  * the timer used by delayed work is not running, e.g. by calling
    1194             :  * kthread_cancel_delayed_work_timer().
    1195             :  *
    1196             :  * The work might still be in use when this function finishes. See the
    1197             :  * current_work proceed by the worker.
    1198             :  *
    1199             :  * Return: %true if @work was pending and successfully canceled,
    1200             :  *      %false if @work was not pending
    1201             :  */
    1202             : static bool __kthread_cancel_work(struct kthread_work *work)
    1203             : {
    1204             :         /*
    1205             :          * Try to remove the work from a worker list. It might either
    1206             :          * be from worker->work_list or from worker->delayed_work_list.
    1207             :          */
    1208           0 :         if (!list_empty(&work->node)) {
    1209           0 :                 list_del_init(&work->node);
    1210             :                 return true;
    1211             :         }
    1212             : 
    1213             :         return false;
    1214             : }
    1215             : 
    1216             : /**
    1217             :  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
    1218             :  * @worker: kthread worker to use
    1219             :  * @dwork: kthread delayed work to queue
    1220             :  * @delay: number of jiffies to wait before queuing
    1221             :  *
    1222             :  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
    1223             :  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
    1224             :  * @work is guaranteed to be queued immediately.
    1225             :  *
    1226             :  * Return: %false if @dwork was idle and queued, %true otherwise.
    1227             :  *
    1228             :  * A special case is when the work is being canceled in parallel.
    1229             :  * It might be caused either by the real kthread_cancel_delayed_work_sync()
    1230             :  * or yet another kthread_mod_delayed_work() call. We let the other command
    1231             :  * win and return %true here. The return value can be used for reference
    1232             :  * counting and the number of queued works stays the same. Anyway, the caller
    1233             :  * is supposed to synchronize these operations a reasonable way.
    1234             :  *
    1235             :  * This function is safe to call from any context including IRQ handler.
    1236             :  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
    1237             :  * for details.
    1238             :  */
    1239           0 : bool kthread_mod_delayed_work(struct kthread_worker *worker,
    1240             :                               struct kthread_delayed_work *dwork,
    1241             :                               unsigned long delay)
    1242             : {
    1243           0 :         struct kthread_work *work = &dwork->work;
    1244             :         unsigned long flags;
    1245             :         int ret;
    1246             : 
    1247           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1248             : 
    1249             :         /* Do not bother with canceling when never queued. */
    1250           0 :         if (!work->worker) {
    1251             :                 ret = false;
    1252             :                 goto fast_queue;
    1253             :         }
    1254             : 
    1255             :         /* Work must not be used with >1 worker, see kthread_queue_work() */
    1256           0 :         WARN_ON_ONCE(work->worker != worker);
    1257             : 
    1258             :         /*
    1259             :          * Temporary cancel the work but do not fight with another command
    1260             :          * that is canceling the work as well.
    1261             :          *
    1262             :          * It is a bit tricky because of possible races with another
    1263             :          * mod_delayed_work() and cancel_delayed_work() callers.
    1264             :          *
    1265             :          * The timer must be canceled first because worker->lock is released
    1266             :          * when doing so. But the work can be removed from the queue (list)
    1267             :          * only when it can be queued again so that the return value can
    1268             :          * be used for reference counting.
    1269             :          */
    1270           0 :         kthread_cancel_delayed_work_timer(work, &flags);
    1271           0 :         if (work->canceling) {
    1272             :                 /* The number of works in the queue does not change. */
    1273             :                 ret = true;
    1274             :                 goto out;
    1275             :         }
    1276           0 :         ret = __kthread_cancel_work(work);
    1277             : 
    1278             : fast_queue:
    1279           0 :         __kthread_queue_delayed_work(worker, dwork, delay);
    1280             : out:
    1281           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1282           0 :         return ret;
    1283             : }
    1284             : EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
    1285             : 
    1286           0 : static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
    1287             : {
    1288           0 :         struct kthread_worker *worker = work->worker;
    1289             :         unsigned long flags;
    1290           0 :         int ret = false;
    1291             : 
    1292           0 :         if (!worker)
    1293             :                 goto out;
    1294             : 
    1295           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1296             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1297           0 :         WARN_ON_ONCE(work->worker != worker);
    1298             : 
    1299           0 :         if (is_dwork)
    1300           0 :                 kthread_cancel_delayed_work_timer(work, &flags);
    1301             : 
    1302           0 :         ret = __kthread_cancel_work(work);
    1303             : 
    1304           0 :         if (worker->current_work != work)
    1305             :                 goto out_fast;
    1306             : 
    1307             :         /*
    1308             :          * The work is in progress and we need to wait with the lock released.
    1309             :          * In the meantime, block any queuing by setting the canceling counter.
    1310             :          */
    1311           0 :         work->canceling++;
    1312           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1313           0 :         kthread_flush_work(work);
    1314           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1315           0 :         work->canceling--;
    1316             : 
    1317             : out_fast:
    1318           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1319             : out:
    1320           0 :         return ret;
    1321             : }
    1322             : 
    1323             : /**
    1324             :  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
    1325             :  * @work: the kthread work to cancel
    1326             :  *
    1327             :  * Cancel @work and wait for its execution to finish.  This function
    1328             :  * can be used even if the work re-queues itself. On return from this
    1329             :  * function, @work is guaranteed to be not pending or executing on any CPU.
    1330             :  *
    1331             :  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
    1332             :  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
    1333             :  *
    1334             :  * The caller must ensure that the worker on which @work was last
    1335             :  * queued can't be destroyed before this function returns.
    1336             :  *
    1337             :  * Return: %true if @work was pending, %false otherwise.
    1338             :  */
    1339           0 : bool kthread_cancel_work_sync(struct kthread_work *work)
    1340             : {
    1341           0 :         return __kthread_cancel_work_sync(work, false);
    1342             : }
    1343             : EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
    1344             : 
    1345             : /**
    1346             :  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
    1347             :  *      wait for it to finish.
    1348             :  * @dwork: the kthread delayed work to cancel
    1349             :  *
    1350             :  * This is kthread_cancel_work_sync() for delayed works.
    1351             :  *
    1352             :  * Return: %true if @dwork was pending, %false otherwise.
    1353             :  */
    1354           0 : bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
    1355             : {
    1356           0 :         return __kthread_cancel_work_sync(&dwork->work, true);
    1357             : }
    1358             : EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
    1359             : 
    1360             : /**
    1361             :  * kthread_flush_worker - flush all current works on a kthread_worker
    1362             :  * @worker: worker to flush
    1363             :  *
    1364             :  * Wait until all currently executing or pending works on @worker are
    1365             :  * finished.
    1366             :  */
    1367           0 : void kthread_flush_worker(struct kthread_worker *worker)
    1368             : {
    1369           0 :         struct kthread_flush_work fwork = {
    1370             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1371           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1372             :         };
    1373             : 
    1374           0 :         kthread_queue_work(worker, &fwork.work);
    1375           0 :         wait_for_completion(&fwork.done);
    1376           0 : }
    1377             : EXPORT_SYMBOL_GPL(kthread_flush_worker);
    1378             : 
    1379             : /**
    1380             :  * kthread_destroy_worker - destroy a kthread worker
    1381             :  * @worker: worker to be destroyed
    1382             :  *
    1383             :  * Flush and destroy @worker.  The simple flush is enough because the kthread
    1384             :  * worker API is used only in trivial scenarios.  There are no multi-step state
    1385             :  * machines needed.
    1386             :  *
    1387             :  * Note that this function is not responsible for handling delayed work, so
    1388             :  * caller should be responsible for queuing or canceling all delayed work items
    1389             :  * before invoke this function.
    1390             :  */
    1391           0 : void kthread_destroy_worker(struct kthread_worker *worker)
    1392             : {
    1393             :         struct task_struct *task;
    1394             : 
    1395           0 :         task = worker->task;
    1396           0 :         if (WARN_ON(!task))
    1397             :                 return;
    1398             : 
    1399           0 :         kthread_flush_worker(worker);
    1400           0 :         kthread_stop(task);
    1401           0 :         WARN_ON(!list_empty(&worker->delayed_work_list));
    1402           0 :         WARN_ON(!list_empty(&worker->work_list));
    1403           0 :         kfree(worker);
    1404             : }
    1405             : EXPORT_SYMBOL(kthread_destroy_worker);
    1406             : 
    1407             : /**
    1408             :  * kthread_use_mm - make the calling kthread operate on an address space
    1409             :  * @mm: address space to operate on
    1410             :  */
    1411           0 : void kthread_use_mm(struct mm_struct *mm)
    1412             : {
    1413             :         struct mm_struct *active_mm;
    1414           0 :         struct task_struct *tsk = current;
    1415             : 
    1416           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1417           0 :         WARN_ON_ONCE(tsk->mm);
    1418             : 
    1419             :         /*
    1420             :          * It is possible for mm to be the same as tsk->active_mm, but
    1421             :          * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
    1422             :          * because these references are not equivalent.
    1423             :          */
    1424           0 :         mmgrab(mm);
    1425             : 
    1426           0 :         task_lock(tsk);
    1427             :         /* Hold off tlb flush IPIs while switching mm's */
    1428             :         local_irq_disable();
    1429           0 :         active_mm = tsk->active_mm;
    1430           0 :         tsk->active_mm = mm;
    1431           0 :         tsk->mm = mm;
    1432           0 :         membarrier_update_current_mm(mm);
    1433           0 :         switch_mm_irqs_off(active_mm, mm, tsk);
    1434             :         local_irq_enable();
    1435           0 :         task_unlock(tsk);
    1436             : #ifdef finish_arch_post_lock_switch
    1437             :         finish_arch_post_lock_switch();
    1438             : #endif
    1439             : 
    1440             :         /*
    1441             :          * When a kthread starts operating on an address space, the loop
    1442             :          * in membarrier_{private,global}_expedited() may not observe
    1443             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1444             :          * memory barrier after storing to tsk->mm, before accessing
    1445             :          * user-space memory. A full memory barrier for membarrier
    1446             :          * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
    1447             :          * mmdrop_lazy_tlb().
    1448             :          */
    1449           0 :         mmdrop_lazy_tlb(active_mm);
    1450           0 : }
    1451             : EXPORT_SYMBOL_GPL(kthread_use_mm);
    1452             : 
    1453             : /**
    1454             :  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
    1455             :  * @mm: address space to operate on
    1456             :  */
    1457           0 : void kthread_unuse_mm(struct mm_struct *mm)
    1458             : {
    1459           0 :         struct task_struct *tsk = current;
    1460             : 
    1461           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1462           0 :         WARN_ON_ONCE(!tsk->mm);
    1463             : 
    1464           0 :         task_lock(tsk);
    1465             :         /*
    1466             :          * When a kthread stops operating on an address space, the loop
    1467             :          * in membarrier_{private,global}_expedited() may not observe
    1468             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1469             :          * memory barrier after accessing user-space memory, before
    1470             :          * clearing tsk->mm.
    1471             :          */
    1472             :         smp_mb__after_spinlock();
    1473           0 :         sync_mm_rss(mm);
    1474             :         local_irq_disable();
    1475           0 :         tsk->mm = NULL;
    1476           0 :         membarrier_update_current_mm(NULL);
    1477           0 :         mmgrab_lazy_tlb(mm);
    1478             :         /* active_mm is still 'mm' */
    1479           0 :         enter_lazy_tlb(mm, tsk);
    1480             :         local_irq_enable();
    1481           0 :         task_unlock(tsk);
    1482             : 
    1483           0 :         mmdrop(mm);
    1484           0 : }
    1485             : EXPORT_SYMBOL_GPL(kthread_unuse_mm);
    1486             : 
    1487             : #ifdef CONFIG_BLK_CGROUP
    1488             : /**
    1489             :  * kthread_associate_blkcg - associate blkcg to current kthread
    1490             :  * @css: the cgroup info
    1491             :  *
    1492             :  * Current thread must be a kthread. The thread is running jobs on behalf of
    1493             :  * other threads. In some cases, we expect the jobs attach cgroup info of
    1494             :  * original threads instead of that of current thread. This function stores
    1495             :  * original thread's cgroup info in current kthread context for later
    1496             :  * retrieval.
    1497             :  */
    1498             : void kthread_associate_blkcg(struct cgroup_subsys_state *css)
    1499             : {
    1500             :         struct kthread *kthread;
    1501             : 
    1502             :         if (!(current->flags & PF_KTHREAD))
    1503             :                 return;
    1504             :         kthread = to_kthread(current);
    1505             :         if (!kthread)
    1506             :                 return;
    1507             : 
    1508             :         if (kthread->blkcg_css) {
    1509             :                 css_put(kthread->blkcg_css);
    1510             :                 kthread->blkcg_css = NULL;
    1511             :         }
    1512             :         if (css) {
    1513             :                 css_get(css);
    1514             :                 kthread->blkcg_css = css;
    1515             :         }
    1516             : }
    1517             : EXPORT_SYMBOL(kthread_associate_blkcg);
    1518             : 
    1519             : /**
    1520             :  * kthread_blkcg - get associated blkcg css of current kthread
    1521             :  *
    1522             :  * Current thread must be a kthread.
    1523             :  */
    1524             : struct cgroup_subsys_state *kthread_blkcg(void)
    1525             : {
    1526             :         struct kthread *kthread;
    1527             : 
    1528             :         if (current->flags & PF_KTHREAD) {
    1529             :                 kthread = to_kthread(current);
    1530             :                 if (kthread)
    1531             :                         return kthread->blkcg_css;
    1532             :         }
    1533             :         return NULL;
    1534             : }
    1535             : #endif

Generated by: LCOV version 1.14