LCOV - code coverage report
Current view: top level - include/linux - workqueue.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 5 8 62.5 %
Date: 2023-07-19 18:55:55 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * workqueue.h --- work queue handling for Linux.
       4             :  */
       5             : 
       6             : #ifndef _LINUX_WORKQUEUE_H
       7             : #define _LINUX_WORKQUEUE_H
       8             : 
       9             : #include <linux/timer.h>
      10             : #include <linux/linkage.h>
      11             : #include <linux/bitops.h>
      12             : #include <linux/lockdep.h>
      13             : #include <linux/threads.h>
      14             : #include <linux/atomic.h>
      15             : #include <linux/cpumask.h>
      16             : #include <linux/rcupdate.h>
      17             : 
      18             : struct workqueue_struct;
      19             : 
      20             : struct work_struct;
      21             : typedef void (*work_func_t)(struct work_struct *work);
      22             : void delayed_work_timer_fn(struct timer_list *t);
      23             : 
      24             : /*
      25             :  * The first word is the work queue pointer and the flags rolled into
      26             :  * one
      27             :  */
      28             : #define work_data_bits(work) ((unsigned long *)(&(work)->data))
      29             : 
      30             : enum {
      31             :         WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */
      32             :         WORK_STRUCT_INACTIVE_BIT= 1,    /* work item is inactive */
      33             :         WORK_STRUCT_PWQ_BIT     = 2,    /* data points to pwq */
      34             :         WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */
      35             : #ifdef CONFIG_DEBUG_OBJECTS_WORK
      36             :         WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */
      37             :         WORK_STRUCT_COLOR_SHIFT = 5,    /* color for workqueue flushing */
      38             : #else
      39             :         WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
      40             : #endif
      41             : 
      42             :         WORK_STRUCT_COLOR_BITS  = 4,
      43             : 
      44             :         WORK_STRUCT_PENDING     = 1 << WORK_STRUCT_PENDING_BIT,
      45             :         WORK_STRUCT_INACTIVE    = 1 << WORK_STRUCT_INACTIVE_BIT,
      46             :         WORK_STRUCT_PWQ         = 1 << WORK_STRUCT_PWQ_BIT,
      47             :         WORK_STRUCT_LINKED      = 1 << WORK_STRUCT_LINKED_BIT,
      48             : #ifdef CONFIG_DEBUG_OBJECTS_WORK
      49             :         WORK_STRUCT_STATIC      = 1 << WORK_STRUCT_STATIC_BIT,
      50             : #else
      51             :         WORK_STRUCT_STATIC      = 0,
      52             : #endif
      53             : 
      54             :         WORK_NR_COLORS          = (1 << WORK_STRUCT_COLOR_BITS),
      55             : 
      56             :         /* not bound to any CPU, prefer the local CPU */
      57             :         WORK_CPU_UNBOUND        = NR_CPUS,
      58             : 
      59             :         /*
      60             :          * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
      61             :          * This makes pwqs aligned to 256 bytes and allows 16 workqueue
      62             :          * flush colors.
      63             :          */
      64             :         WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
      65             :                                   WORK_STRUCT_COLOR_BITS,
      66             : 
      67             :         /* data contains off-queue information when !WORK_STRUCT_PWQ */
      68             :         WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_COLOR_SHIFT,
      69             : 
      70             :         __WORK_OFFQ_CANCELING   = WORK_OFFQ_FLAG_BASE,
      71             :         WORK_OFFQ_CANCELING     = (1 << __WORK_OFFQ_CANCELING),
      72             : 
      73             :         /*
      74             :          * When a work item is off queue, its high bits point to the last
      75             :          * pool it was on.  Cap at 31 bits and use the highest number to
      76             :          * indicate that no pool is associated.
      77             :          */
      78             :         WORK_OFFQ_FLAG_BITS     = 1,
      79             :         WORK_OFFQ_POOL_SHIFT    = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
      80             :         WORK_OFFQ_LEFT          = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
      81             :         WORK_OFFQ_POOL_BITS     = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
      82             :         WORK_OFFQ_POOL_NONE     = (1LU << WORK_OFFQ_POOL_BITS) - 1,
      83             : 
      84             :         /* convenience constants */
      85             :         WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
      86             :         WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
      87             :         WORK_STRUCT_NO_POOL     = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
      88             : 
      89             :         /* bit mask for work_busy() return values */
      90             :         WORK_BUSY_PENDING       = 1 << 0,
      91             :         WORK_BUSY_RUNNING       = 1 << 1,
      92             : 
      93             :         /* maximum string length for set_worker_desc() */
      94             :         WORKER_DESC_LEN         = 24,
      95             : };
      96             : 
      97             : struct work_struct {
      98             :         atomic_long_t data;
      99             :         struct list_head entry;
     100             :         work_func_t func;
     101             : #ifdef CONFIG_LOCKDEP
     102             :         struct lockdep_map lockdep_map;
     103             : #endif
     104             : };
     105             : 
     106             : #define WORK_DATA_INIT()        ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
     107             : #define WORK_DATA_STATIC_INIT() \
     108             :         ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
     109             : 
     110             : struct delayed_work {
     111             :         struct work_struct work;
     112             :         struct timer_list timer;
     113             : 
     114             :         /* target workqueue and CPU ->timer uses to queue ->work */
     115             :         struct workqueue_struct *wq;
     116             :         int cpu;
     117             : };
     118             : 
     119             : struct rcu_work {
     120             :         struct work_struct work;
     121             :         struct rcu_head rcu;
     122             : 
     123             :         /* target workqueue ->rcu uses to queue ->work */
     124             :         struct workqueue_struct *wq;
     125             : };
     126             : 
     127             : /**
     128             :  * struct workqueue_attrs - A struct for workqueue attributes.
     129             :  *
     130             :  * This can be used to change attributes of an unbound workqueue.
     131             :  */
     132             : struct workqueue_attrs {
     133             :         /**
     134             :          * @nice: nice level
     135             :          */
     136             :         int nice;
     137             : 
     138             :         /**
     139             :          * @cpumask: allowed CPUs
     140             :          */
     141             :         cpumask_var_t cpumask;
     142             : 
     143             :         /**
     144             :          * @no_numa: disable NUMA affinity
     145             :          *
     146             :          * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
     147             :          * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
     148             :          * doesn't participate in pool hash calculations or equality comparisons.
     149             :          */
     150             :         bool no_numa;
     151             : };
     152             : 
     153             : static inline struct delayed_work *to_delayed_work(struct work_struct *work)
     154             : {
     155          38 :         return container_of(work, struct delayed_work, work);
     156             : }
     157             : 
     158             : static inline struct rcu_work *to_rcu_work(struct work_struct *work)
     159             : {
     160           0 :         return container_of(work, struct rcu_work, work);
     161             : }
     162             : 
     163             : struct execute_work {
     164             :         struct work_struct work;
     165             : };
     166             : 
     167             : #ifdef CONFIG_LOCKDEP
     168             : /*
     169             :  * NB: because we have to copy the lockdep_map, setting _key
     170             :  * here is required, otherwise it could get initialised to the
     171             :  * copy of the lockdep_map!
     172             :  */
     173             : #define __WORK_INIT_LOCKDEP_MAP(n, k) \
     174             :         .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
     175             : #else
     176             : #define __WORK_INIT_LOCKDEP_MAP(n, k)
     177             : #endif
     178             : 
     179             : #define __WORK_INITIALIZER(n, f) {                                      \
     180             :         .data = WORK_DATA_STATIC_INIT(),                                \
     181             :         .entry  = { &(n).entry, &(n).entry },                           \
     182             :         .func = (f),                                                    \
     183             :         __WORK_INIT_LOCKDEP_MAP(#n, &(n))                           \
     184             :         }
     185             : 
     186             : #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {                      \
     187             :         .work = __WORK_INITIALIZER((n).work, (f)),                      \
     188             :         .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
     189             :                                      (tflags) | TIMER_IRQSAFE),         \
     190             :         }
     191             : 
     192             : #define DECLARE_WORK(n, f)                                              \
     193             :         struct work_struct n = __WORK_INITIALIZER(n, f)
     194             : 
     195             : #define DECLARE_DELAYED_WORK(n, f)                                      \
     196             :         struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
     197             : 
     198             : #define DECLARE_DEFERRABLE_WORK(n, f)                                   \
     199             :         struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
     200             : 
     201             : #ifdef CONFIG_DEBUG_OBJECTS_WORK
     202             : extern void __init_work(struct work_struct *work, int onstack);
     203             : extern void destroy_work_on_stack(struct work_struct *work);
     204             : extern void destroy_delayed_work_on_stack(struct delayed_work *work);
     205             : static inline unsigned int work_static(struct work_struct *work)
     206             : {
     207             :         return *work_data_bits(work) & WORK_STRUCT_STATIC;
     208             : }
     209             : #else
     210             : static inline void __init_work(struct work_struct *work, int onstack) { }
     211             : static inline void destroy_work_on_stack(struct work_struct *work) { }
     212             : static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
     213             : static inline unsigned int work_static(struct work_struct *work) { return 0; }
     214             : #endif
     215             : 
     216             : /*
     217             :  * initialize all of a work item in one go
     218             :  *
     219             :  * NOTE! No point in using "atomic_long_set()": using a direct
     220             :  * assignment of the work data initializer allows the compiler
     221             :  * to generate better code.
     222             :  */
     223             : #ifdef CONFIG_LOCKDEP
     224             : #define __INIT_WORK(_work, _func, _onstack)                             \
     225             :         do {                                                            \
     226             :                 static struct lock_class_key __key;                     \
     227             :                                                                         \
     228             :                 __init_work((_work), _onstack);                         \
     229             :                 (_work)->data = (atomic_long_t) WORK_DATA_INIT();    \
     230             :                 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
     231             :                 INIT_LIST_HEAD(&(_work)->entry);                 \
     232             :                 (_work)->func = (_func);                             \
     233             :         } while (0)
     234             : #else
     235             : #define __INIT_WORK(_work, _func, _onstack)                             \
     236             :         do {                                                            \
     237             :                 __init_work((_work), _onstack);                         \
     238             :                 (_work)->data = (atomic_long_t) WORK_DATA_INIT();    \
     239             :                 INIT_LIST_HEAD(&(_work)->entry);                 \
     240             :                 (_work)->func = (_func);                             \
     241             :         } while (0)
     242             : #endif
     243             : 
     244             : #define INIT_WORK(_work, _func)                                         \
     245             :         __INIT_WORK((_work), (_func), 0)
     246             : 
     247             : #define INIT_WORK_ONSTACK(_work, _func)                                 \
     248             :         __INIT_WORK((_work), (_func), 1)
     249             : 
     250             : #define __INIT_DELAYED_WORK(_work, _func, _tflags)                      \
     251             :         do {                                                            \
     252             :                 INIT_WORK(&(_work)->work, (_func));                      \
     253             :                 __init_timer(&(_work)->timer,                            \
     254             :                              delayed_work_timer_fn,                     \
     255             :                              (_tflags) | TIMER_IRQSAFE);                \
     256             :         } while (0)
     257             : 
     258             : #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)              \
     259             :         do {                                                            \
     260             :                 INIT_WORK_ONSTACK(&(_work)->work, (_func));              \
     261             :                 __init_timer_on_stack(&(_work)->timer,                   \
     262             :                                       delayed_work_timer_fn,            \
     263             :                                       (_tflags) | TIMER_IRQSAFE);       \
     264             :         } while (0)
     265             : 
     266             : #define INIT_DELAYED_WORK(_work, _func)                                 \
     267             :         __INIT_DELAYED_WORK(_work, _func, 0)
     268             : 
     269             : #define INIT_DELAYED_WORK_ONSTACK(_work, _func)                         \
     270             :         __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
     271             : 
     272             : #define INIT_DEFERRABLE_WORK(_work, _func)                              \
     273             :         __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
     274             : 
     275             : #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)                      \
     276             :         __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
     277             : 
     278             : #define INIT_RCU_WORK(_work, _func)                                     \
     279             :         INIT_WORK(&(_work)->work, (_func))
     280             : 
     281             : #define INIT_RCU_WORK_ONSTACK(_work, _func)                             \
     282             :         INIT_WORK_ONSTACK(&(_work)->work, (_func))
     283             : 
     284             : /**
     285             :  * work_pending - Find out whether a work item is currently pending
     286             :  * @work: The work item in question
     287             :  */
     288             : #define work_pending(work) \
     289             :         test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
     290             : 
     291             : /**
     292             :  * delayed_work_pending - Find out whether a delayable work item is currently
     293             :  * pending
     294             :  * @w: The work item in question
     295             :  */
     296             : #define delayed_work_pending(w) \
     297             :         work_pending(&(w)->work)
     298             : 
     299             : /*
     300             :  * Workqueue flags and constants.  For details, please refer to
     301             :  * Documentation/core-api/workqueue.rst.
     302             :  */
     303             : enum {
     304             :         WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
     305             :         WQ_FREEZABLE            = 1 << 2, /* freeze during suspend */
     306             :         WQ_MEM_RECLAIM          = 1 << 3, /* may be used for memory reclaim */
     307             :         WQ_HIGHPRI              = 1 << 4, /* high priority */
     308             :         WQ_CPU_INTENSIVE        = 1 << 5, /* cpu intensive workqueue */
     309             :         WQ_SYSFS                = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
     310             : 
     311             :         /*
     312             :          * Per-cpu workqueues are generally preferred because they tend to
     313             :          * show better performance thanks to cache locality.  Per-cpu
     314             :          * workqueues exclude the scheduler from choosing the CPU to
     315             :          * execute the worker threads, which has an unfortunate side effect
     316             :          * of increasing power consumption.
     317             :          *
     318             :          * The scheduler considers a CPU idle if it doesn't have any task
     319             :          * to execute and tries to keep idle cores idle to conserve power;
     320             :          * however, for example, a per-cpu work item scheduled from an
     321             :          * interrupt handler on an idle CPU will force the scheduler to
     322             :          * execute the work item on that CPU breaking the idleness, which in
     323             :          * turn may lead to more scheduling choices which are sub-optimal
     324             :          * in terms of power consumption.
     325             :          *
     326             :          * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
     327             :          * but become unbound if workqueue.power_efficient kernel param is
     328             :          * specified.  Per-cpu workqueues which are identified to
     329             :          * contribute significantly to power-consumption are identified and
     330             :          * marked with this flag and enabling the power_efficient mode
     331             :          * leads to noticeable power saving at the cost of small
     332             :          * performance disadvantage.
     333             :          *
     334             :          * http://thread.gmane.org/gmane.linux.kernel/1480396
     335             :          */
     336             :         WQ_POWER_EFFICIENT      = 1 << 7,
     337             : 
     338             :         __WQ_DESTROYING         = 1 << 15, /* internal: workqueue is destroying */
     339             :         __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
     340             :         __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
     341             :         __WQ_LEGACY             = 1 << 18, /* internal: create*_workqueue() */
     342             :         __WQ_ORDERED_EXPLICIT   = 1 << 19, /* internal: alloc_ordered_workqueue() */
     343             : 
     344             :         WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
     345             :         WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
     346             :         WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
     347             : };
     348             : 
     349             : /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
     350             : #define WQ_UNBOUND_MAX_ACTIVE   \
     351             :         max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
     352             : 
     353             : /*
     354             :  * System-wide workqueues which are always present.
     355             :  *
     356             :  * system_wq is the one used by schedule[_delayed]_work[_on]().
     357             :  * Multi-CPU multi-threaded.  There are users which expect relatively
     358             :  * short queue flush time.  Don't queue works which can run for too
     359             :  * long.
     360             :  *
     361             :  * system_highpri_wq is similar to system_wq but for work items which
     362             :  * require WQ_HIGHPRI.
     363             :  *
     364             :  * system_long_wq is similar to system_wq but may host long running
     365             :  * works.  Queue flushing might take relatively long.
     366             :  *
     367             :  * system_unbound_wq is unbound workqueue.  Workers are not bound to
     368             :  * any specific CPU, not concurrency managed, and all queued works are
     369             :  * executed immediately as long as max_active limit is not reached and
     370             :  * resources are available.
     371             :  *
     372             :  * system_freezable_wq is equivalent to system_wq except that it's
     373             :  * freezable.
     374             :  *
     375             :  * *_power_efficient_wq are inclined towards saving power and converted
     376             :  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
     377             :  * they are same as their non-power-efficient counterparts - e.g.
     378             :  * system_power_efficient_wq is identical to system_wq if
     379             :  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
     380             :  */
     381             : extern struct workqueue_struct *system_wq;
     382             : extern struct workqueue_struct *system_highpri_wq;
     383             : extern struct workqueue_struct *system_long_wq;
     384             : extern struct workqueue_struct *system_unbound_wq;
     385             : extern struct workqueue_struct *system_freezable_wq;
     386             : extern struct workqueue_struct *system_power_efficient_wq;
     387             : extern struct workqueue_struct *system_freezable_power_efficient_wq;
     388             : 
     389             : /**
     390             :  * alloc_workqueue - allocate a workqueue
     391             :  * @fmt: printf format for the name of the workqueue
     392             :  * @flags: WQ_* flags
     393             :  * @max_active: max in-flight work items, 0 for default
     394             :  * remaining args: args for @fmt
     395             :  *
     396             :  * Allocate a workqueue with the specified parameters.  For detailed
     397             :  * information on WQ_* flags, please refer to
     398             :  * Documentation/core-api/workqueue.rst.
     399             :  *
     400             :  * RETURNS:
     401             :  * Pointer to the allocated workqueue on success, %NULL on failure.
     402             :  */
     403             : __printf(1, 4) struct workqueue_struct *
     404             : alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
     405             : 
     406             : /**
     407             :  * alloc_ordered_workqueue - allocate an ordered workqueue
     408             :  * @fmt: printf format for the name of the workqueue
     409             :  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
     410             :  * @args: args for @fmt
     411             :  *
     412             :  * Allocate an ordered workqueue.  An ordered workqueue executes at
     413             :  * most one work item at any given time in the queued order.  They are
     414             :  * implemented as unbound workqueues with @max_active of one.
     415             :  *
     416             :  * RETURNS:
     417             :  * Pointer to the allocated workqueue on success, %NULL on failure.
     418             :  */
     419             : #define alloc_ordered_workqueue(fmt, flags, args...)                    \
     420             :         alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |                \
     421             :                         __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
     422             : 
     423             : #define create_workqueue(name)                                          \
     424             :         alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
     425             : #define create_freezable_workqueue(name)                                \
     426             :         alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |       \
     427             :                         WQ_MEM_RECLAIM, 1, (name))
     428             : #define create_singlethread_workqueue(name)                             \
     429             :         alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
     430             : 
     431             : extern void destroy_workqueue(struct workqueue_struct *wq);
     432             : 
     433             : struct workqueue_attrs *alloc_workqueue_attrs(void);
     434             : void free_workqueue_attrs(struct workqueue_attrs *attrs);
     435             : int apply_workqueue_attrs(struct workqueue_struct *wq,
     436             :                           const struct workqueue_attrs *attrs);
     437             : int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
     438             : 
     439             : extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
     440             :                         struct work_struct *work);
     441             : extern bool queue_work_node(int node, struct workqueue_struct *wq,
     442             :                             struct work_struct *work);
     443             : extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
     444             :                         struct delayed_work *work, unsigned long delay);
     445             : extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
     446             :                         struct delayed_work *dwork, unsigned long delay);
     447             : extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
     448             : 
     449             : extern void __flush_workqueue(struct workqueue_struct *wq);
     450             : extern void drain_workqueue(struct workqueue_struct *wq);
     451             : 
     452             : extern int schedule_on_each_cpu(work_func_t func);
     453             : 
     454             : int execute_in_process_context(work_func_t fn, struct execute_work *);
     455             : 
     456             : extern bool flush_work(struct work_struct *work);
     457             : extern bool cancel_work(struct work_struct *work);
     458             : extern bool cancel_work_sync(struct work_struct *work);
     459             : 
     460             : extern bool flush_delayed_work(struct delayed_work *dwork);
     461             : extern bool cancel_delayed_work(struct delayed_work *dwork);
     462             : extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
     463             : 
     464             : extern bool flush_rcu_work(struct rcu_work *rwork);
     465             : 
     466             : extern void workqueue_set_max_active(struct workqueue_struct *wq,
     467             :                                      int max_active);
     468             : extern struct work_struct *current_work(void);
     469             : extern bool current_is_workqueue_rescuer(void);
     470             : extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
     471             : extern unsigned int work_busy(struct work_struct *work);
     472             : extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
     473             : extern void print_worker_info(const char *log_lvl, struct task_struct *task);
     474             : extern void show_all_workqueues(void);
     475             : extern void show_freezable_workqueues(void);
     476             : extern void show_one_workqueue(struct workqueue_struct *wq);
     477             : extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
     478             : 
     479             : /**
     480             :  * queue_work - queue work on a workqueue
     481             :  * @wq: workqueue to use
     482             :  * @work: work to queue
     483             :  *
     484             :  * Returns %false if @work was already on a queue, %true otherwise.
     485             :  *
     486             :  * We queue the work to the CPU on which it was submitted, but if the CPU dies
     487             :  * it can be processed by another CPU.
     488             :  *
     489             :  * Memory-ordering properties:  If it returns %true, guarantees that all stores
     490             :  * preceding the call to queue_work() in the program order will be visible from
     491             :  * the CPU which will execute @work by the time such work executes, e.g.,
     492             :  *
     493             :  * { x is initially 0 }
     494             :  *
     495             :  *   CPU0                               CPU1
     496             :  *
     497             :  *   WRITE_ONCE(x, 1);                  [ @work is being executed ]
     498             :  *   r0 = queue_work(wq, work);           r1 = READ_ONCE(x);
     499             :  *
     500             :  * Forbids: r0 == true && r1 == 0
     501             :  */
     502             : static inline bool queue_work(struct workqueue_struct *wq,
     503             :                               struct work_struct *work)
     504             : {
     505          76 :         return queue_work_on(WORK_CPU_UNBOUND, wq, work);
     506             : }
     507             : 
     508             : /**
     509             :  * queue_delayed_work - queue work on a workqueue after delay
     510             :  * @wq: workqueue to use
     511             :  * @dwork: delayable work to queue
     512             :  * @delay: number of jiffies to wait before queueing
     513             :  *
     514             :  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
     515             :  */
     516             : static inline bool queue_delayed_work(struct workqueue_struct *wq,
     517             :                                       struct delayed_work *dwork,
     518             :                                       unsigned long delay)
     519             : {
     520          11 :         return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
     521             : }
     522             : 
     523             : /**
     524             :  * mod_delayed_work - modify delay of or queue a delayed work
     525             :  * @wq: workqueue to use
     526             :  * @dwork: work to queue
     527             :  * @delay: number of jiffies to wait before queueing
     528             :  *
     529             :  * mod_delayed_work_on() on local CPU.
     530             :  */
     531             : static inline bool mod_delayed_work(struct workqueue_struct *wq,
     532             :                                     struct delayed_work *dwork,
     533             :                                     unsigned long delay)
     534             : {
     535           0 :         return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
     536             : }
     537             : 
     538             : /**
     539             :  * schedule_work_on - put work task on a specific cpu
     540             :  * @cpu: cpu to put the work task on
     541             :  * @work: job to be done
     542             :  *
     543             :  * This puts a job on a specific cpu
     544             :  */
     545             : static inline bool schedule_work_on(int cpu, struct work_struct *work)
     546             : {
     547           0 :         return queue_work_on(cpu, system_wq, work);
     548             : }
     549             : 
     550             : /**
     551             :  * schedule_work - put work task in global workqueue
     552             :  * @work: job to be done
     553             :  *
     554             :  * Returns %false if @work was already on the kernel-global workqueue and
     555             :  * %true otherwise.
     556             :  *
     557             :  * This puts a job in the kernel-global workqueue if it was not already
     558             :  * queued and leaves it in the same position on the kernel-global
     559             :  * workqueue otherwise.
     560             :  *
     561             :  * Shares the same memory-ordering properties of queue_work(), cf. the
     562             :  * DocBook header of queue_work().
     563             :  */
     564             : static inline bool schedule_work(struct work_struct *work)
     565             : {
     566         100 :         return queue_work(system_wq, work);
     567             : }
     568             : 
     569             : /*
     570             :  * Detect attempt to flush system-wide workqueues at compile time when possible.
     571             :  *
     572             :  * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
     573             :  * for reasons and steps for converting system-wide workqueues into local workqueues.
     574             :  */
     575             : extern void __warn_flushing_systemwide_wq(void)
     576             :         __compiletime_warning("Please avoid flushing system-wide workqueues.");
     577             : 
     578             : /**
     579             :  * flush_scheduled_work - ensure that any scheduled work has run to completion.
     580             :  *
     581             :  * Forces execution of the kernel-global workqueue and blocks until its
     582             :  * completion.
     583             :  *
     584             :  * It's very easy to get into trouble if you don't take great care.
     585             :  * Either of the following situations will lead to deadlock:
     586             :  *
     587             :  *      One of the work items currently on the workqueue needs to acquire
     588             :  *      a lock held by your code or its caller.
     589             :  *
     590             :  *      Your code is running in the context of a work routine.
     591             :  *
     592             :  * They will be detected by lockdep when they occur, but the first might not
     593             :  * occur very often.  It depends on what work items are on the workqueue and
     594             :  * what locks they need, which you have no control over.
     595             :  *
     596             :  * In most situations flushing the entire workqueue is overkill; you merely
     597             :  * need to know that a particular work item isn't queued and isn't running.
     598             :  * In such cases you should use cancel_delayed_work_sync() or
     599             :  * cancel_work_sync() instead.
     600             :  *
     601             :  * Please stop calling this function! A conversion to stop flushing system-wide
     602             :  * workqueues is in progress. This function will be removed after all in-tree
     603             :  * users stopped calling this function.
     604             :  */
     605             : /*
     606             :  * The background of commit 771c035372a036f8 ("deprecate the
     607             :  * '__deprecated' attribute warnings entirely and for good") is that,
     608             :  * since Linus builds all modules between every single pull he does,
     609             :  * the standard kernel build needs to be _clean_ in order to be able to
     610             :  * notice when new problems happen. Therefore, don't emit warning while
     611             :  * there are in-tree users.
     612             :  */
     613             : #define flush_scheduled_work()                                          \
     614             : ({                                                                      \
     615             :         if (0)                                                          \
     616             :                 __warn_flushing_systemwide_wq();                        \
     617             :         __flush_workqueue(system_wq);                                   \
     618             : })
     619             : 
     620             : /*
     621             :  * Although there is no longer in-tree caller, for now just emit warning
     622             :  * in order to give out-of-tree callers time to update.
     623             :  */
     624             : #define flush_workqueue(wq)                                             \
     625             : ({                                                                      \
     626             :         struct workqueue_struct *_wq = (wq);                            \
     627             :                                                                         \
     628             :         if ((__builtin_constant_p(_wq == system_wq) &&                  \
     629             :              _wq == system_wq) ||                                       \
     630             :             (__builtin_constant_p(_wq == system_highpri_wq) &&          \
     631             :              _wq == system_highpri_wq) ||                               \
     632             :             (__builtin_constant_p(_wq == system_long_wq) &&             \
     633             :              _wq == system_long_wq) ||                                  \
     634             :             (__builtin_constant_p(_wq == system_unbound_wq) &&          \
     635             :              _wq == system_unbound_wq) ||                               \
     636             :             (__builtin_constant_p(_wq == system_freezable_wq) &&        \
     637             :              _wq == system_freezable_wq) ||                             \
     638             :             (__builtin_constant_p(_wq == system_power_efficient_wq) &&  \
     639             :              _wq == system_power_efficient_wq) ||                       \
     640             :             (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
     641             :              _wq == system_freezable_power_efficient_wq))               \
     642             :                 __warn_flushing_systemwide_wq();                        \
     643             :         __flush_workqueue(_wq);                                         \
     644             : })
     645             : 
     646             : /**
     647             :  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
     648             :  * @cpu: cpu to use
     649             :  * @dwork: job to be done
     650             :  * @delay: number of jiffies to wait
     651             :  *
     652             :  * After waiting for a given time this puts a job in the kernel-global
     653             :  * workqueue on the specified CPU.
     654             :  */
     655             : static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
     656             :                                             unsigned long delay)
     657             : {
     658             :         return queue_delayed_work_on(cpu, system_wq, dwork, delay);
     659             : }
     660             : 
     661             : /**
     662             :  * schedule_delayed_work - put work task in global workqueue after delay
     663             :  * @dwork: job to be done
     664             :  * @delay: number of jiffies to wait or 0 for immediate execution
     665             :  *
     666             :  * After waiting for a given time this puts a job in the kernel-global
     667             :  * workqueue.
     668             :  */
     669             : static inline bool schedule_delayed_work(struct delayed_work *dwork,
     670             :                                          unsigned long delay)
     671             : {
     672           2 :         return queue_delayed_work(system_wq, dwork, delay);
     673             : }
     674             : 
     675             : #ifndef CONFIG_SMP
     676             : static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
     677             : {
     678             :         return fn(arg);
     679             : }
     680             : static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
     681             : {
     682             :         return fn(arg);
     683             : }
     684             : #else
     685             : long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
     686             : long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
     687             : #endif /* CONFIG_SMP */
     688             : 
     689             : #ifdef CONFIG_FREEZER
     690             : extern void freeze_workqueues_begin(void);
     691             : extern bool freeze_workqueues_busy(void);
     692             : extern void thaw_workqueues(void);
     693             : #endif /* CONFIG_FREEZER */
     694             : 
     695             : #ifdef CONFIG_SYSFS
     696             : int workqueue_sysfs_register(struct workqueue_struct *wq);
     697             : #else   /* CONFIG_SYSFS */
     698             : static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
     699             : { return 0; }
     700             : #endif  /* CONFIG_SYSFS */
     701             : 
     702             : #ifdef CONFIG_WQ_WATCHDOG
     703             : void wq_watchdog_touch(int cpu);
     704             : #else   /* CONFIG_WQ_WATCHDOG */
     705             : static inline void wq_watchdog_touch(int cpu) { }
     706             : #endif  /* CONFIG_WQ_WATCHDOG */
     707             : 
     708             : #ifdef CONFIG_SMP
     709             : int workqueue_prepare_cpu(unsigned int cpu);
     710             : int workqueue_online_cpu(unsigned int cpu);
     711             : int workqueue_offline_cpu(unsigned int cpu);
     712             : #endif
     713             : 
     714             : void __init workqueue_init_early(void);
     715             : void __init workqueue_init(void);
     716             : 
     717             : #endif

Generated by: LCOV version 1.14