LCOV - code coverage report
Current view: top level - io_uring - io-wq.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 5 594 0.8 %
Date: 2023-04-06 08:38:28 Functions: 1 48 2.1 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Basic worker thread pool for io_uring
       4             :  *
       5             :  * Copyright (C) 2019 Jens Axboe
       6             :  *
       7             :  */
       8             : #include <linux/kernel.h>
       9             : #include <linux/init.h>
      10             : #include <linux/errno.h>
      11             : #include <linux/sched/signal.h>
      12             : #include <linux/percpu.h>
      13             : #include <linux/slab.h>
      14             : #include <linux/rculist_nulls.h>
      15             : #include <linux/cpu.h>
      16             : #include <linux/task_work.h>
      17             : #include <linux/audit.h>
      18             : #include <uapi/linux/io_uring.h>
      19             : 
      20             : #include "io-wq.h"
      21             : #include "slist.h"
      22             : #include "io_uring.h"
      23             : 
      24             : #define WORKER_IDLE_TIMEOUT     (5 * HZ)
      25             : 
      26             : enum {
      27             :         IO_WORKER_F_UP          = 1,    /* up and active */
      28             :         IO_WORKER_F_RUNNING     = 2,    /* account as running */
      29             :         IO_WORKER_F_FREE        = 4,    /* worker on free list */
      30             :         IO_WORKER_F_BOUND       = 8,    /* is doing bounded work */
      31             : };
      32             : 
      33             : enum {
      34             :         IO_WQ_BIT_EXIT          = 0,    /* wq exiting */
      35             : };
      36             : 
      37             : enum {
      38             :         IO_ACCT_STALLED_BIT     = 0,    /* stalled on hash */
      39             : };
      40             : 
      41             : /*
      42             :  * One for each thread in a wqe pool
      43             :  */
      44             : struct io_worker {
      45             :         refcount_t ref;
      46             :         unsigned flags;
      47             :         struct hlist_nulls_node nulls_node;
      48             :         struct list_head all_list;
      49             :         struct task_struct *task;
      50             :         struct io_wqe *wqe;
      51             : 
      52             :         struct io_wq_work *cur_work;
      53             :         struct io_wq_work *next_work;
      54             :         raw_spinlock_t lock;
      55             : 
      56             :         struct completion ref_done;
      57             : 
      58             :         unsigned long create_state;
      59             :         struct callback_head create_work;
      60             :         int create_index;
      61             : 
      62             :         union {
      63             :                 struct rcu_head rcu;
      64             :                 struct work_struct work;
      65             :         };
      66             : };
      67             : 
      68             : #if BITS_PER_LONG == 64
      69             : #define IO_WQ_HASH_ORDER        6
      70             : #else
      71             : #define IO_WQ_HASH_ORDER        5
      72             : #endif
      73             : 
      74             : #define IO_WQ_NR_HASH_BUCKETS   (1u << IO_WQ_HASH_ORDER)
      75             : 
      76             : struct io_wqe_acct {
      77             :         unsigned nr_workers;
      78             :         unsigned max_workers;
      79             :         int index;
      80             :         atomic_t nr_running;
      81             :         raw_spinlock_t lock;
      82             :         struct io_wq_work_list work_list;
      83             :         unsigned long flags;
      84             : };
      85             : 
      86             : enum {
      87             :         IO_WQ_ACCT_BOUND,
      88             :         IO_WQ_ACCT_UNBOUND,
      89             :         IO_WQ_ACCT_NR,
      90             : };
      91             : 
      92             : /*
      93             :  * Per-node worker thread pool
      94             :  */
      95             : struct io_wqe {
      96             :         raw_spinlock_t lock;
      97             :         struct io_wqe_acct acct[IO_WQ_ACCT_NR];
      98             : 
      99             :         int node;
     100             : 
     101             :         struct hlist_nulls_head free_list;
     102             :         struct list_head all_list;
     103             : 
     104             :         struct wait_queue_entry wait;
     105             : 
     106             :         struct io_wq *wq;
     107             :         struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
     108             : 
     109             :         cpumask_var_t cpu_mask;
     110             : };
     111             : 
     112             : /*
     113             :  * Per io_wq state
     114             :   */
     115             : struct io_wq {
     116             :         unsigned long state;
     117             : 
     118             :         free_work_fn *free_work;
     119             :         io_wq_work_fn *do_work;
     120             : 
     121             :         struct io_wq_hash *hash;
     122             : 
     123             :         atomic_t worker_refs;
     124             :         struct completion worker_done;
     125             : 
     126             :         struct hlist_node cpuhp_node;
     127             : 
     128             :         struct task_struct *task;
     129             : 
     130             :         struct io_wqe *wqes[];
     131             : };
     132             : 
     133             : static enum cpuhp_state io_wq_online;
     134             : 
     135             : struct io_cb_cancel_data {
     136             :         work_cancel_fn *fn;
     137             :         void *data;
     138             :         int nr_running;
     139             :         int nr_pending;
     140             :         bool cancel_all;
     141             : };
     142             : 
     143             : static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
     144             : static void io_wqe_dec_running(struct io_worker *worker);
     145             : static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
     146             :                                         struct io_wqe_acct *acct,
     147             :                                         struct io_cb_cancel_data *match);
     148             : static void create_worker_cb(struct callback_head *cb);
     149             : static void io_wq_cancel_tw_create(struct io_wq *wq);
     150             : 
     151             : static bool io_worker_get(struct io_worker *worker)
     152             : {
     153           0 :         return refcount_inc_not_zero(&worker->ref);
     154             : }
     155             : 
     156           0 : static void io_worker_release(struct io_worker *worker)
     157             : {
     158           0 :         if (refcount_dec_and_test(&worker->ref))
     159           0 :                 complete(&worker->ref_done);
     160           0 : }
     161             : 
     162             : static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
     163             : {
     164           0 :         return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
     165             : }
     166             : 
     167             : static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
     168             :                                                    struct io_wq_work *work)
     169             : {
     170           0 :         return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
     171             : }
     172             : 
     173             : static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
     174             : {
     175           0 :         return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
     176             : }
     177             : 
     178             : static void io_worker_ref_put(struct io_wq *wq)
     179             : {
     180           0 :         if (atomic_dec_and_test(&wq->worker_refs))
     181           0 :                 complete(&wq->worker_done);
     182             : }
     183             : 
     184           0 : static void io_worker_cancel_cb(struct io_worker *worker)
     185             : {
     186           0 :         struct io_wqe_acct *acct = io_wqe_get_acct(worker);
     187           0 :         struct io_wqe *wqe = worker->wqe;
     188           0 :         struct io_wq *wq = wqe->wq;
     189             : 
     190           0 :         atomic_dec(&acct->nr_running);
     191           0 :         raw_spin_lock(&worker->wqe->lock);
     192           0 :         acct->nr_workers--;
     193           0 :         raw_spin_unlock(&worker->wqe->lock);
     194           0 :         io_worker_ref_put(wq);
     195           0 :         clear_bit_unlock(0, &worker->create_state);
     196           0 :         io_worker_release(worker);
     197           0 : }
     198             : 
     199           0 : static bool io_task_worker_match(struct callback_head *cb, void *data)
     200             : {
     201             :         struct io_worker *worker;
     202             : 
     203           0 :         if (cb->func != create_worker_cb)
     204             :                 return false;
     205           0 :         worker = container_of(cb, struct io_worker, create_work);
     206           0 :         return worker == data;
     207             : }
     208             : 
     209           0 : static void io_worker_exit(struct io_worker *worker)
     210             : {
     211           0 :         struct io_wqe *wqe = worker->wqe;
     212           0 :         struct io_wq *wq = wqe->wq;
     213             : 
     214           0 :         while (1) {
     215           0 :                 struct callback_head *cb = task_work_cancel_match(wq->task,
     216             :                                                 io_task_worker_match, worker);
     217             : 
     218           0 :                 if (!cb)
     219             :                         break;
     220           0 :                 io_worker_cancel_cb(worker);
     221             :         }
     222             : 
     223           0 :         io_worker_release(worker);
     224           0 :         wait_for_completion(&worker->ref_done);
     225             : 
     226           0 :         raw_spin_lock(&wqe->lock);
     227           0 :         if (worker->flags & IO_WORKER_F_FREE)
     228           0 :                 hlist_nulls_del_rcu(&worker->nulls_node);
     229           0 :         list_del_rcu(&worker->all_list);
     230           0 :         raw_spin_unlock(&wqe->lock);
     231           0 :         io_wqe_dec_running(worker);
     232           0 :         worker->flags = 0;
     233           0 :         preempt_disable();
     234           0 :         current->flags &= ~PF_IO_WORKER;
     235           0 :         preempt_enable();
     236             : 
     237           0 :         kfree_rcu(worker, rcu);
     238           0 :         io_worker_ref_put(wqe->wq);
     239           0 :         do_exit(0);
     240             : }
     241             : 
     242             : static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
     243             : {
     244           0 :         bool ret = false;
     245             : 
     246           0 :         raw_spin_lock(&acct->lock);
     247           0 :         if (!wq_list_empty(&acct->work_list) &&
     248           0 :             !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
     249           0 :                 ret = true;
     250           0 :         raw_spin_unlock(&acct->lock);
     251             : 
     252             :         return ret;
     253             : }
     254             : 
     255             : /*
     256             :  * Check head of free list for an available worker. If one isn't available,
     257             :  * caller must create one.
     258             :  */
     259           0 : static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
     260             :                                         struct io_wqe_acct *acct)
     261             :         __must_hold(RCU)
     262             : {
     263             :         struct hlist_nulls_node *n;
     264             :         struct io_worker *worker;
     265             : 
     266             :         /*
     267             :          * Iterate free_list and see if we can find an idle worker to
     268             :          * activate. If a given worker is on the free_list but in the process
     269             :          * of exiting, keep trying.
     270             :          */
     271           0 :         hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
     272           0 :                 if (!io_worker_get(worker))
     273           0 :                         continue;
     274           0 :                 if (io_wqe_get_acct(worker) != acct) {
     275           0 :                         io_worker_release(worker);
     276           0 :                         continue;
     277             :                 }
     278           0 :                 if (wake_up_process(worker->task)) {
     279           0 :                         io_worker_release(worker);
     280           0 :                         return true;
     281             :                 }
     282           0 :                 io_worker_release(worker);
     283             :         }
     284             : 
     285             :         return false;
     286             : }
     287             : 
     288             : /*
     289             :  * We need a worker. If we find a free one, we're good. If not, and we're
     290             :  * below the max number of workers, create one.
     291             :  */
     292           0 : static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
     293             : {
     294             :         /*
     295             :          * Most likely an attempt to queue unbounded work on an io_wq that
     296             :          * wasn't setup with any unbounded workers.
     297             :          */
     298           0 :         if (unlikely(!acct->max_workers))
     299           0 :                 pr_warn_once("io-wq is not configured for unbound workers");
     300             : 
     301           0 :         raw_spin_lock(&wqe->lock);
     302           0 :         if (acct->nr_workers >= acct->max_workers) {
     303           0 :                 raw_spin_unlock(&wqe->lock);
     304           0 :                 return true;
     305             :         }
     306           0 :         acct->nr_workers++;
     307           0 :         raw_spin_unlock(&wqe->lock);
     308           0 :         atomic_inc(&acct->nr_running);
     309           0 :         atomic_inc(&wqe->wq->worker_refs);
     310           0 :         return create_io_worker(wqe->wq, wqe, acct->index);
     311             : }
     312             : 
     313             : static void io_wqe_inc_running(struct io_worker *worker)
     314             : {
     315           0 :         struct io_wqe_acct *acct = io_wqe_get_acct(worker);
     316             : 
     317           0 :         atomic_inc(&acct->nr_running);
     318             : }
     319             : 
     320           0 : static void create_worker_cb(struct callback_head *cb)
     321             : {
     322             :         struct io_worker *worker;
     323             :         struct io_wq *wq;
     324             :         struct io_wqe *wqe;
     325             :         struct io_wqe_acct *acct;
     326           0 :         bool do_create = false;
     327             : 
     328           0 :         worker = container_of(cb, struct io_worker, create_work);
     329           0 :         wqe = worker->wqe;
     330           0 :         wq = wqe->wq;
     331           0 :         acct = &wqe->acct[worker->create_index];
     332           0 :         raw_spin_lock(&wqe->lock);
     333           0 :         if (acct->nr_workers < acct->max_workers) {
     334           0 :                 acct->nr_workers++;
     335           0 :                 do_create = true;
     336             :         }
     337           0 :         raw_spin_unlock(&wqe->lock);
     338           0 :         if (do_create) {
     339           0 :                 create_io_worker(wq, wqe, worker->create_index);
     340             :         } else {
     341           0 :                 atomic_dec(&acct->nr_running);
     342             :                 io_worker_ref_put(wq);
     343             :         }
     344           0 :         clear_bit_unlock(0, &worker->create_state);
     345           0 :         io_worker_release(worker);
     346           0 : }
     347             : 
     348           0 : static bool io_queue_worker_create(struct io_worker *worker,
     349             :                                    struct io_wqe_acct *acct,
     350             :                                    task_work_func_t func)
     351             : {
     352           0 :         struct io_wqe *wqe = worker->wqe;
     353           0 :         struct io_wq *wq = wqe->wq;
     354             : 
     355             :         /* raced with exit, just ignore create call */
     356           0 :         if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
     357             :                 goto fail;
     358           0 :         if (!io_worker_get(worker))
     359             :                 goto fail;
     360             :         /*
     361             :          * create_state manages ownership of create_work/index. We should
     362             :          * only need one entry per worker, as the worker going to sleep
     363             :          * will trigger the condition, and waking will clear it once it
     364             :          * runs the task_work.
     365             :          */
     366           0 :         if (test_bit(0, &worker->create_state) ||
     367           0 :             test_and_set_bit_lock(0, &worker->create_state))
     368             :                 goto fail_release;
     369             : 
     370           0 :         atomic_inc(&wq->worker_refs);
     371           0 :         init_task_work(&worker->create_work, func);
     372           0 :         worker->create_index = acct->index;
     373           0 :         if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
     374             :                 /*
     375             :                  * EXIT may have been set after checking it above, check after
     376             :                  * adding the task_work and remove any creation item if it is
     377             :                  * now set. wq exit does that too, but we can have added this
     378             :                  * work item after we canceled in io_wq_exit_workers().
     379             :                  */
     380           0 :                 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
     381           0 :                         io_wq_cancel_tw_create(wq);
     382             :                 io_worker_ref_put(wq);
     383             :                 return true;
     384             :         }
     385           0 :         io_worker_ref_put(wq);
     386           0 :         clear_bit_unlock(0, &worker->create_state);
     387             : fail_release:
     388           0 :         io_worker_release(worker);
     389             : fail:
     390           0 :         atomic_dec(&acct->nr_running);
     391             :         io_worker_ref_put(wq);
     392             :         return false;
     393             : }
     394             : 
     395           0 : static void io_wqe_dec_running(struct io_worker *worker)
     396             : {
     397           0 :         struct io_wqe_acct *acct = io_wqe_get_acct(worker);
     398           0 :         struct io_wqe *wqe = worker->wqe;
     399             : 
     400           0 :         if (!(worker->flags & IO_WORKER_F_UP))
     401             :                 return;
     402             : 
     403           0 :         if (!atomic_dec_and_test(&acct->nr_running))
     404             :                 return;
     405           0 :         if (!io_acct_run_queue(acct))
     406             :                 return;
     407             : 
     408           0 :         atomic_inc(&acct->nr_running);
     409           0 :         atomic_inc(&wqe->wq->worker_refs);
     410           0 :         io_queue_worker_create(worker, acct, create_worker_cb);
     411             : }
     412             : 
     413             : /*
     414             :  * Worker will start processing some work. Move it to the busy list, if
     415             :  * it's currently on the freelist
     416             :  */
     417             : static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
     418             : {
     419           0 :         if (worker->flags & IO_WORKER_F_FREE) {
     420           0 :                 worker->flags &= ~IO_WORKER_F_FREE;
     421           0 :                 raw_spin_lock(&wqe->lock);
     422           0 :                 hlist_nulls_del_init_rcu(&worker->nulls_node);
     423           0 :                 raw_spin_unlock(&wqe->lock);
     424             :         }
     425             : }
     426             : 
     427             : /*
     428             :  * No work, worker going to sleep. Move to freelist, and unuse mm if we
     429             :  * have one attached. Dropping the mm may potentially sleep, so we drop
     430             :  * the lock in that case and return success. Since the caller has to
     431             :  * retry the loop in that case (we changed task state), we don't regrab
     432             :  * the lock if we return success.
     433             :  */
     434             : static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
     435             :         __must_hold(wqe->lock)
     436             : {
     437           0 :         if (!(worker->flags & IO_WORKER_F_FREE)) {
     438           0 :                 worker->flags |= IO_WORKER_F_FREE;
     439           0 :                 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
     440             :         }
     441             : }
     442             : 
     443             : static inline unsigned int io_get_work_hash(struct io_wq_work *work)
     444             : {
     445           0 :         return work->flags >> IO_WQ_HASH_SHIFT;
     446             : }
     447             : 
     448           0 : static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
     449             : {
     450           0 :         struct io_wq *wq = wqe->wq;
     451           0 :         bool ret = false;
     452             : 
     453           0 :         spin_lock_irq(&wq->hash->wait.lock);
     454           0 :         if (list_empty(&wqe->wait.entry)) {
     455           0 :                 __add_wait_queue(&wq->hash->wait, &wqe->wait);
     456           0 :                 if (!test_bit(hash, &wq->hash->map)) {
     457           0 :                         __set_current_state(TASK_RUNNING);
     458           0 :                         list_del_init(&wqe->wait.entry);
     459           0 :                         ret = true;
     460             :                 }
     461             :         }
     462           0 :         spin_unlock_irq(&wq->hash->wait.lock);
     463           0 :         return ret;
     464             : }
     465             : 
     466           0 : static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
     467             :                                            struct io_worker *worker)
     468             :         __must_hold(acct->lock)
     469             : {
     470             :         struct io_wq_work_node *node, *prev;
     471             :         struct io_wq_work *work, *tail;
     472           0 :         unsigned int stall_hash = -1U;
     473           0 :         struct io_wqe *wqe = worker->wqe;
     474             : 
     475           0 :         wq_list_for_each(node, prev, &acct->work_list) {
     476             :                 unsigned int hash;
     477             : 
     478           0 :                 work = container_of(node, struct io_wq_work, list);
     479             : 
     480             :                 /* not hashed, can run anytime */
     481           0 :                 if (!io_wq_is_hashed(work)) {
     482           0 :                         wq_list_del(&acct->work_list, node, prev);
     483             :                         return work;
     484             :                 }
     485             : 
     486           0 :                 hash = io_get_work_hash(work);
     487             :                 /* all items with this hash lie in [work, tail] */
     488           0 :                 tail = wqe->hash_tail[hash];
     489             : 
     490             :                 /* hashed, can run if not already running */
     491           0 :                 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
     492           0 :                         wqe->hash_tail[hash] = NULL;
     493           0 :                         wq_list_cut(&acct->work_list, &tail->list, prev);
     494             :                         return work;
     495             :                 }
     496           0 :                 if (stall_hash == -1U)
     497           0 :                         stall_hash = hash;
     498             :                 /* fast forward to a next hash, for-each will fix up @prev */
     499           0 :                 node = &tail->list;
     500             :         }
     501             : 
     502           0 :         if (stall_hash != -1U) {
     503             :                 bool unstalled;
     504             : 
     505             :                 /*
     506             :                  * Set this before dropping the lock to avoid racing with new
     507             :                  * work being added and clearing the stalled bit.
     508             :                  */
     509           0 :                 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
     510           0 :                 raw_spin_unlock(&acct->lock);
     511           0 :                 unstalled = io_wait_on_hash(wqe, stall_hash);
     512           0 :                 raw_spin_lock(&acct->lock);
     513           0 :                 if (unstalled) {
     514           0 :                         clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
     515           0 :                         if (wq_has_sleeper(&wqe->wq->hash->wait))
     516           0 :                                 wake_up(&wqe->wq->hash->wait);
     517             :                 }
     518             :         }
     519             : 
     520             :         return NULL;
     521             : }
     522             : 
     523             : static void io_assign_current_work(struct io_worker *worker,
     524             :                                    struct io_wq_work *work)
     525             : {
     526           0 :         if (work) {
     527           0 :                 io_run_task_work();
     528           0 :                 cond_resched();
     529             :         }
     530             : 
     531           0 :         raw_spin_lock(&worker->lock);
     532           0 :         worker->cur_work = work;
     533           0 :         worker->next_work = NULL;
     534           0 :         raw_spin_unlock(&worker->lock);
     535             : }
     536             : 
     537             : static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
     538             : 
     539           0 : static void io_worker_handle_work(struct io_worker *worker)
     540             : {
     541           0 :         struct io_wqe_acct *acct = io_wqe_get_acct(worker);
     542           0 :         struct io_wqe *wqe = worker->wqe;
     543           0 :         struct io_wq *wq = wqe->wq;
     544           0 :         bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
     545             : 
     546             :         do {
     547             :                 struct io_wq_work *work;
     548             : 
     549             :                 /*
     550             :                  * If we got some work, mark us as busy. If we didn't, but
     551             :                  * the list isn't empty, it means we stalled on hashed work.
     552             :                  * Mark us stalled so we don't keep looking for work when we
     553             :                  * can't make progress, any work completion or insertion will
     554             :                  * clear the stalled flag.
     555             :                  */
     556           0 :                 raw_spin_lock(&acct->lock);
     557           0 :                 work = io_get_next_work(acct, worker);
     558           0 :                 raw_spin_unlock(&acct->lock);
     559           0 :                 if (work) {
     560           0 :                         __io_worker_busy(wqe, worker);
     561             : 
     562             :                         /*
     563             :                          * Make sure cancelation can find this, even before
     564             :                          * it becomes the active work. That avoids a window
     565             :                          * where the work has been removed from our general
     566             :                          * work list, but isn't yet discoverable as the
     567             :                          * current work item for this worker.
     568             :                          */
     569           0 :                         raw_spin_lock(&worker->lock);
     570           0 :                         worker->next_work = work;
     571           0 :                         raw_spin_unlock(&worker->lock);
     572             :                 } else {
     573             :                         break;
     574             :                 }
     575           0 :                 io_assign_current_work(worker, work);
     576           0 :                 __set_current_state(TASK_RUNNING);
     577             : 
     578             :                 /* handle a whole dependent link */
     579             :                 do {
     580             :                         struct io_wq_work *next_hashed, *linked;
     581           0 :                         unsigned int hash = io_get_work_hash(work);
     582             : 
     583           0 :                         next_hashed = wq_next_work(work);
     584             : 
     585           0 :                         if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
     586           0 :                                 work->flags |= IO_WQ_WORK_CANCEL;
     587           0 :                         wq->do_work(work);
     588           0 :                         io_assign_current_work(worker, NULL);
     589             : 
     590           0 :                         linked = wq->free_work(work);
     591           0 :                         work = next_hashed;
     592           0 :                         if (!work && linked && !io_wq_is_hashed(linked)) {
     593           0 :                                 work = linked;
     594           0 :                                 linked = NULL;
     595             :                         }
     596           0 :                         io_assign_current_work(worker, work);
     597           0 :                         if (linked)
     598           0 :                                 io_wqe_enqueue(wqe, linked);
     599             : 
     600           0 :                         if (hash != -1U && !next_hashed) {
     601             :                                 /* serialize hash clear with wake_up() */
     602           0 :                                 spin_lock_irq(&wq->hash->wait.lock);
     603           0 :                                 clear_bit(hash, &wq->hash->map);
     604           0 :                                 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
     605           0 :                                 spin_unlock_irq(&wq->hash->wait.lock);
     606           0 :                                 if (wq_has_sleeper(&wq->hash->wait))
     607           0 :                                         wake_up(&wq->hash->wait);
     608             :                         }
     609           0 :                 } while (work);
     610             :         } while (1);
     611           0 : }
     612             : 
     613           0 : static int io_wqe_worker(void *data)
     614             : {
     615           0 :         struct io_worker *worker = data;
     616           0 :         struct io_wqe_acct *acct = io_wqe_get_acct(worker);
     617           0 :         struct io_wqe *wqe = worker->wqe;
     618           0 :         struct io_wq *wq = wqe->wq;
     619           0 :         bool last_timeout = false;
     620             :         char buf[TASK_COMM_LEN];
     621             : 
     622           0 :         worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
     623             : 
     624           0 :         snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
     625           0 :         set_task_comm(current, buf);
     626             : 
     627           0 :         while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
     628             :                 long ret;
     629             : 
     630           0 :                 set_current_state(TASK_INTERRUPTIBLE);
     631           0 :                 while (io_acct_run_queue(acct))
     632           0 :                         io_worker_handle_work(worker);
     633             : 
     634           0 :                 raw_spin_lock(&wqe->lock);
     635             :                 /* timed out, exit unless we're the last worker */
     636           0 :                 if (last_timeout && acct->nr_workers > 1) {
     637           0 :                         acct->nr_workers--;
     638           0 :                         raw_spin_unlock(&wqe->lock);
     639           0 :                         __set_current_state(TASK_RUNNING);
     640           0 :                         break;
     641             :                 }
     642           0 :                 last_timeout = false;
     643           0 :                 __io_worker_idle(wqe, worker);
     644           0 :                 raw_spin_unlock(&wqe->lock);
     645           0 :                 if (io_run_task_work())
     646           0 :                         continue;
     647           0 :                 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
     648           0 :                 if (signal_pending(current)) {
     649             :                         struct ksignal ksig;
     650             : 
     651           0 :                         if (!get_signal(&ksig))
     652           0 :                                 continue;
     653           0 :                         break;
     654             :                 }
     655           0 :                 last_timeout = !ret;
     656             :         }
     657             : 
     658           0 :         if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
     659           0 :                 io_worker_handle_work(worker);
     660             : 
     661           0 :         io_worker_exit(worker);
     662             :         return 0;
     663             : }
     664             : 
     665             : /*
     666             :  * Called when a worker is scheduled in. Mark us as currently running.
     667             :  */
     668           0 : void io_wq_worker_running(struct task_struct *tsk)
     669             : {
     670           0 :         struct io_worker *worker = tsk->worker_private;
     671             : 
     672           0 :         if (!worker)
     673             :                 return;
     674           0 :         if (!(worker->flags & IO_WORKER_F_UP))
     675             :                 return;
     676           0 :         if (worker->flags & IO_WORKER_F_RUNNING)
     677             :                 return;
     678           0 :         worker->flags |= IO_WORKER_F_RUNNING;
     679             :         io_wqe_inc_running(worker);
     680             : }
     681             : 
     682             : /*
     683             :  * Called when worker is going to sleep. If there are no workers currently
     684             :  * running and we have work pending, wake up a free one or create a new one.
     685             :  */
     686           0 : void io_wq_worker_sleeping(struct task_struct *tsk)
     687             : {
     688           0 :         struct io_worker *worker = tsk->worker_private;
     689             : 
     690           0 :         if (!worker)
     691             :                 return;
     692           0 :         if (!(worker->flags & IO_WORKER_F_UP))
     693             :                 return;
     694           0 :         if (!(worker->flags & IO_WORKER_F_RUNNING))
     695             :                 return;
     696             : 
     697           0 :         worker->flags &= ~IO_WORKER_F_RUNNING;
     698           0 :         io_wqe_dec_running(worker);
     699             : }
     700             : 
     701           0 : static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
     702             :                                struct task_struct *tsk)
     703             : {
     704           0 :         tsk->worker_private = worker;
     705           0 :         worker->task = tsk;
     706           0 :         set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
     707           0 :         tsk->flags |= PF_NO_SETAFFINITY;
     708             : 
     709           0 :         raw_spin_lock(&wqe->lock);
     710           0 :         hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
     711           0 :         list_add_tail_rcu(&worker->all_list, &wqe->all_list);
     712           0 :         worker->flags |= IO_WORKER_F_FREE;
     713           0 :         raw_spin_unlock(&wqe->lock);
     714           0 :         wake_up_new_task(tsk);
     715           0 : }
     716             : 
     717           0 : static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
     718             : {
     719           0 :         return true;
     720             : }
     721             : 
     722           0 : static inline bool io_should_retry_thread(long err)
     723             : {
     724             :         /*
     725             :          * Prevent perpetual task_work retry, if the task (or its group) is
     726             :          * exiting.
     727             :          */
     728           0 :         if (fatal_signal_pending(current))
     729             :                 return false;
     730             : 
     731           0 :         switch (err) {
     732             :         case -EAGAIN:
     733             :         case -ERESTARTSYS:
     734             :         case -ERESTARTNOINTR:
     735             :         case -ERESTARTNOHAND:
     736             :                 return true;
     737             :         default:
     738           0 :                 return false;
     739             :         }
     740             : }
     741             : 
     742           0 : static void create_worker_cont(struct callback_head *cb)
     743             : {
     744             :         struct io_worker *worker;
     745             :         struct task_struct *tsk;
     746             :         struct io_wqe *wqe;
     747             : 
     748           0 :         worker = container_of(cb, struct io_worker, create_work);
     749           0 :         clear_bit_unlock(0, &worker->create_state);
     750           0 :         wqe = worker->wqe;
     751           0 :         tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
     752           0 :         if (!IS_ERR(tsk)) {
     753           0 :                 io_init_new_worker(wqe, worker, tsk);
     754           0 :                 io_worker_release(worker);
     755           0 :                 return;
     756           0 :         } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
     757           0 :                 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
     758             : 
     759           0 :                 atomic_dec(&acct->nr_running);
     760           0 :                 raw_spin_lock(&wqe->lock);
     761           0 :                 acct->nr_workers--;
     762           0 :                 if (!acct->nr_workers) {
     763           0 :                         struct io_cb_cancel_data match = {
     764             :                                 .fn             = io_wq_work_match_all,
     765             :                                 .cancel_all     = true,
     766             :                         };
     767             : 
     768           0 :                         raw_spin_unlock(&wqe->lock);
     769           0 :                         while (io_acct_cancel_pending_work(wqe, acct, &match))
     770             :                                 ;
     771             :                 } else {
     772           0 :                         raw_spin_unlock(&wqe->lock);
     773             :                 }
     774           0 :                 io_worker_ref_put(wqe->wq);
     775           0 :                 kfree(worker);
     776           0 :                 return;
     777             :         }
     778             : 
     779             :         /* re-create attempts grab a new worker ref, drop the existing one */
     780           0 :         io_worker_release(worker);
     781           0 :         schedule_work(&worker->work);
     782             : }
     783             : 
     784           0 : static void io_workqueue_create(struct work_struct *work)
     785             : {
     786           0 :         struct io_worker *worker = container_of(work, struct io_worker, work);
     787           0 :         struct io_wqe_acct *acct = io_wqe_get_acct(worker);
     788             : 
     789           0 :         if (!io_queue_worker_create(worker, acct, create_worker_cont))
     790           0 :                 kfree(worker);
     791           0 : }
     792             : 
     793           0 : static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
     794             : {
     795           0 :         struct io_wqe_acct *acct = &wqe->acct[index];
     796             :         struct io_worker *worker;
     797             :         struct task_struct *tsk;
     798             : 
     799           0 :         __set_current_state(TASK_RUNNING);
     800             : 
     801           0 :         worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
     802           0 :         if (!worker) {
     803             : fail:
     804           0 :                 atomic_dec(&acct->nr_running);
     805           0 :                 raw_spin_lock(&wqe->lock);
     806           0 :                 acct->nr_workers--;
     807           0 :                 raw_spin_unlock(&wqe->lock);
     808             :                 io_worker_ref_put(wq);
     809             :                 return false;
     810             :         }
     811             : 
     812           0 :         refcount_set(&worker->ref, 1);
     813           0 :         worker->wqe = wqe;
     814             :         raw_spin_lock_init(&worker->lock);
     815           0 :         init_completion(&worker->ref_done);
     816             : 
     817           0 :         if (index == IO_WQ_ACCT_BOUND)
     818           0 :                 worker->flags |= IO_WORKER_F_BOUND;
     819             : 
     820           0 :         tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
     821           0 :         if (!IS_ERR(tsk)) {
     822           0 :                 io_init_new_worker(wqe, worker, tsk);
     823           0 :         } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
     824           0 :                 kfree(worker);
     825           0 :                 goto fail;
     826             :         } else {
     827           0 :                 INIT_WORK(&worker->work, io_workqueue_create);
     828           0 :                 schedule_work(&worker->work);
     829             :         }
     830             : 
     831             :         return true;
     832             : }
     833             : 
     834             : /*
     835             :  * Iterate the passed in list and call the specific function for each
     836             :  * worker that isn't exiting
     837             :  */
     838           0 : static bool io_wq_for_each_worker(struct io_wqe *wqe,
     839             :                                   bool (*func)(struct io_worker *, void *),
     840             :                                   void *data)
     841             : {
     842             :         struct io_worker *worker;
     843           0 :         bool ret = false;
     844             : 
     845           0 :         list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
     846           0 :                 if (io_worker_get(worker)) {
     847             :                         /* no task if node is/was offline */
     848           0 :                         if (worker->task)
     849           0 :                                 ret = func(worker, data);
     850           0 :                         io_worker_release(worker);
     851           0 :                         if (ret)
     852             :                                 break;
     853             :                 }
     854             :         }
     855             : 
     856           0 :         return ret;
     857             : }
     858             : 
     859           0 : static bool io_wq_worker_wake(struct io_worker *worker, void *data)
     860             : {
     861           0 :         __set_notify_signal(worker->task);
     862           0 :         wake_up_process(worker->task);
     863           0 :         return false;
     864             : }
     865             : 
     866             : static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
     867             : {
     868           0 :         struct io_wq *wq = wqe->wq;
     869             : 
     870             :         do {
     871           0 :                 work->flags |= IO_WQ_WORK_CANCEL;
     872           0 :                 wq->do_work(work);
     873           0 :                 work = wq->free_work(work);
     874           0 :         } while (work);
     875             : }
     876             : 
     877           0 : static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
     878             : {
     879           0 :         struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
     880             :         unsigned int hash;
     881             :         struct io_wq_work *tail;
     882             : 
     883           0 :         if (!io_wq_is_hashed(work)) {
     884             : append:
     885           0 :                 wq_list_add_tail(&work->list, &acct->work_list);
     886             :                 return;
     887             :         }
     888             : 
     889           0 :         hash = io_get_work_hash(work);
     890           0 :         tail = wqe->hash_tail[hash];
     891           0 :         wqe->hash_tail[hash] = work;
     892           0 :         if (!tail)
     893             :                 goto append;
     894             : 
     895           0 :         wq_list_add_after(&work->list, &tail->list, &acct->work_list);
     896             : }
     897             : 
     898           0 : static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
     899             : {
     900           0 :         return work == data;
     901             : }
     902             : 
     903           0 : static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
     904             : {
     905           0 :         struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
     906             :         struct io_cb_cancel_data match;
     907           0 :         unsigned work_flags = work->flags;
     908             :         bool do_create;
     909             : 
     910             :         /*
     911             :          * If io-wq is exiting for this task, or if the request has explicitly
     912             :          * been marked as one that should not get executed, cancel it here.
     913             :          */
     914           0 :         if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
     915           0 :             (work->flags & IO_WQ_WORK_CANCEL)) {
     916             :                 io_run_cancel(work, wqe);
     917           0 :                 return;
     918             :         }
     919             : 
     920           0 :         raw_spin_lock(&acct->lock);
     921           0 :         io_wqe_insert_work(wqe, work);
     922           0 :         clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
     923           0 :         raw_spin_unlock(&acct->lock);
     924             : 
     925           0 :         raw_spin_lock(&wqe->lock);
     926             :         rcu_read_lock();
     927           0 :         do_create = !io_wqe_activate_free_worker(wqe, acct);
     928             :         rcu_read_unlock();
     929             : 
     930           0 :         raw_spin_unlock(&wqe->lock);
     931             : 
     932           0 :         if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
     933           0 :             !atomic_read(&acct->nr_running))) {
     934             :                 bool did_create;
     935             : 
     936           0 :                 did_create = io_wqe_create_worker(wqe, acct);
     937           0 :                 if (likely(did_create))
     938             :                         return;
     939             : 
     940           0 :                 raw_spin_lock(&wqe->lock);
     941           0 :                 if (acct->nr_workers) {
     942           0 :                         raw_spin_unlock(&wqe->lock);
     943           0 :                         return;
     944             :                 }
     945           0 :                 raw_spin_unlock(&wqe->lock);
     946             : 
     947             :                 /* fatal condition, failed to create the first worker */
     948           0 :                 match.fn                = io_wq_work_match_item,
     949           0 :                 match.data              = work,
     950           0 :                 match.cancel_all        = false,
     951             : 
     952           0 :                 io_acct_cancel_pending_work(wqe, acct, &match);
     953             :         }
     954             : }
     955             : 
     956           0 : void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
     957             : {
     958           0 :         struct io_wqe *wqe = wq->wqes[numa_node_id()];
     959             : 
     960           0 :         io_wqe_enqueue(wqe, work);
     961           0 : }
     962             : 
     963             : /*
     964             :  * Work items that hash to the same value will not be done in parallel.
     965             :  * Used to limit concurrent writes, generally hashed by inode.
     966             :  */
     967           0 : void io_wq_hash_work(struct io_wq_work *work, void *val)
     968             : {
     969             :         unsigned int bit;
     970             : 
     971           0 :         bit = hash_ptr(val, IO_WQ_HASH_ORDER);
     972           0 :         work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
     973           0 : }
     974             : 
     975           0 : static bool __io_wq_worker_cancel(struct io_worker *worker,
     976             :                                   struct io_cb_cancel_data *match,
     977             :                                   struct io_wq_work *work)
     978             : {
     979           0 :         if (work && match->fn(work, match->data)) {
     980           0 :                 work->flags |= IO_WQ_WORK_CANCEL;
     981           0 :                 __set_notify_signal(worker->task);
     982             :                 return true;
     983             :         }
     984             : 
     985             :         return false;
     986             : }
     987             : 
     988           0 : static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
     989             : {
     990           0 :         struct io_cb_cancel_data *match = data;
     991             : 
     992             :         /*
     993             :          * Hold the lock to avoid ->cur_work going out of scope, caller
     994             :          * may dereference the passed in work.
     995             :          */
     996           0 :         raw_spin_lock(&worker->lock);
     997           0 :         if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
     998           0 :             __io_wq_worker_cancel(worker, match, worker->next_work))
     999           0 :                 match->nr_running++;
    1000           0 :         raw_spin_unlock(&worker->lock);
    1001             : 
    1002           0 :         return match->nr_running && !match->cancel_all;
    1003             : }
    1004             : 
    1005           0 : static inline void io_wqe_remove_pending(struct io_wqe *wqe,
    1006             :                                          struct io_wq_work *work,
    1007             :                                          struct io_wq_work_node *prev)
    1008             : {
    1009           0 :         struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
    1010           0 :         unsigned int hash = io_get_work_hash(work);
    1011           0 :         struct io_wq_work *prev_work = NULL;
    1012             : 
    1013           0 :         if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
    1014           0 :                 if (prev)
    1015           0 :                         prev_work = container_of(prev, struct io_wq_work, list);
    1016           0 :                 if (prev_work && io_get_work_hash(prev_work) == hash)
    1017           0 :                         wqe->hash_tail[hash] = prev_work;
    1018             :                 else
    1019           0 :                         wqe->hash_tail[hash] = NULL;
    1020             :         }
    1021           0 :         wq_list_del(&acct->work_list, &work->list, prev);
    1022           0 : }
    1023             : 
    1024           0 : static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
    1025             :                                         struct io_wqe_acct *acct,
    1026             :                                         struct io_cb_cancel_data *match)
    1027             : {
    1028             :         struct io_wq_work_node *node, *prev;
    1029             :         struct io_wq_work *work;
    1030             : 
    1031           0 :         raw_spin_lock(&acct->lock);
    1032           0 :         wq_list_for_each(node, prev, &acct->work_list) {
    1033           0 :                 work = container_of(node, struct io_wq_work, list);
    1034           0 :                 if (!match->fn(work, match->data))
    1035           0 :                         continue;
    1036           0 :                 io_wqe_remove_pending(wqe, work, prev);
    1037           0 :                 raw_spin_unlock(&acct->lock);
    1038           0 :                 io_run_cancel(work, wqe);
    1039           0 :                 match->nr_pending++;
    1040             :                 /* not safe to continue after unlock */
    1041             :                 return true;
    1042             :         }
    1043           0 :         raw_spin_unlock(&acct->lock);
    1044             : 
    1045             :         return false;
    1046             : }
    1047             : 
    1048           0 : static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
    1049             :                                        struct io_cb_cancel_data *match)
    1050             : {
    1051             :         int i;
    1052             : retry:
    1053           0 :         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
    1054           0 :                 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
    1055             : 
    1056           0 :                 if (io_acct_cancel_pending_work(wqe, acct, match)) {
    1057           0 :                         if (match->cancel_all)
    1058             :                                 goto retry;
    1059             :                         break;
    1060             :                 }
    1061             :         }
    1062           0 : }
    1063             : 
    1064             : static void io_wqe_cancel_running_work(struct io_wqe *wqe,
    1065             :                                        struct io_cb_cancel_data *match)
    1066             : {
    1067             :         rcu_read_lock();
    1068           0 :         io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
    1069             :         rcu_read_unlock();
    1070             : }
    1071             : 
    1072           0 : enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
    1073             :                                   void *data, bool cancel_all)
    1074             : {
    1075           0 :         struct io_cb_cancel_data match = {
    1076             :                 .fn             = cancel,
    1077             :                 .data           = data,
    1078             :                 .cancel_all     = cancel_all,
    1079             :         };
    1080             :         int node;
    1081             : 
    1082             :         /*
    1083             :          * First check pending list, if we're lucky we can just remove it
    1084             :          * from there. CANCEL_OK means that the work is returned as-new,
    1085             :          * no completion will be posted for it.
    1086             :          *
    1087             :          * Then check if a free (going busy) or busy worker has the work
    1088             :          * currently running. If we find it there, we'll return CANCEL_RUNNING
    1089             :          * as an indication that we attempt to signal cancellation. The
    1090             :          * completion will run normally in this case.
    1091             :          *
    1092             :          * Do both of these while holding the wqe->lock, to ensure that
    1093             :          * we'll find a work item regardless of state.
    1094             :          */
    1095           0 :         for_each_node(node) {
    1096           0 :                 struct io_wqe *wqe = wq->wqes[node];
    1097             : 
    1098           0 :                 io_wqe_cancel_pending_work(wqe, &match);
    1099           0 :                 if (match.nr_pending && !match.cancel_all)
    1100             :                         return IO_WQ_CANCEL_OK;
    1101             : 
    1102           0 :                 raw_spin_lock(&wqe->lock);
    1103           0 :                 io_wqe_cancel_running_work(wqe, &match);
    1104           0 :                 raw_spin_unlock(&wqe->lock);
    1105           0 :                 if (match.nr_running && !match.cancel_all)
    1106             :                         return IO_WQ_CANCEL_RUNNING;
    1107             :         }
    1108             : 
    1109           0 :         if (match.nr_running)
    1110             :                 return IO_WQ_CANCEL_RUNNING;
    1111           0 :         if (match.nr_pending)
    1112             :                 return IO_WQ_CANCEL_OK;
    1113           0 :         return IO_WQ_CANCEL_NOTFOUND;
    1114             : }
    1115             : 
    1116           0 : static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
    1117             :                             int sync, void *key)
    1118             : {
    1119           0 :         struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
    1120             :         int i;
    1121             : 
    1122           0 :         list_del_init(&wait->entry);
    1123             : 
    1124             :         rcu_read_lock();
    1125           0 :         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
    1126           0 :                 struct io_wqe_acct *acct = &wqe->acct[i];
    1127             : 
    1128           0 :                 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
    1129           0 :                         io_wqe_activate_free_worker(wqe, acct);
    1130             :         }
    1131             :         rcu_read_unlock();
    1132           0 :         return 1;
    1133             : }
    1134             : 
    1135           0 : struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
    1136             : {
    1137             :         int ret, node, i;
    1138             :         struct io_wq *wq;
    1139             : 
    1140           0 :         if (WARN_ON_ONCE(!data->free_work || !data->do_work))
    1141             :                 return ERR_PTR(-EINVAL);
    1142           0 :         if (WARN_ON_ONCE(!bounded))
    1143             :                 return ERR_PTR(-EINVAL);
    1144             : 
    1145           0 :         wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
    1146           0 :         if (!wq)
    1147             :                 return ERR_PTR(-ENOMEM);
    1148           0 :         ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
    1149           0 :         if (ret)
    1150             :                 goto err_wq;
    1151             : 
    1152           0 :         refcount_inc(&data->hash->refs);
    1153           0 :         wq->hash = data->hash;
    1154           0 :         wq->free_work = data->free_work;
    1155           0 :         wq->do_work = data->do_work;
    1156             : 
    1157           0 :         ret = -ENOMEM;
    1158           0 :         for_each_node(node) {
    1159             :                 struct io_wqe *wqe;
    1160           0 :                 int alloc_node = node;
    1161             : 
    1162           0 :                 if (!node_online(alloc_node))
    1163             :                         alloc_node = NUMA_NO_NODE;
    1164           0 :                 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
    1165           0 :                 if (!wqe)
    1166             :                         goto err;
    1167           0 :                 wq->wqes[node] = wqe;
    1168           0 :                 if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
    1169             :                         goto err;
    1170           0 :                 cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
    1171           0 :                 wqe->node = alloc_node;
    1172           0 :                 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
    1173           0 :                 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
    1174           0 :                                         task_rlimit(current, RLIMIT_NPROC);
    1175           0 :                 INIT_LIST_HEAD(&wqe->wait.entry);
    1176           0 :                 wqe->wait.func = io_wqe_hash_wake;
    1177           0 :                 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
    1178           0 :                         struct io_wqe_acct *acct = &wqe->acct[i];
    1179             : 
    1180           0 :                         acct->index = i;
    1181           0 :                         atomic_set(&acct->nr_running, 0);
    1182           0 :                         INIT_WQ_LIST(&acct->work_list);
    1183             :                         raw_spin_lock_init(&acct->lock);
    1184             :                 }
    1185           0 :                 wqe->wq = wq;
    1186             :                 raw_spin_lock_init(&wqe->lock);
    1187           0 :                 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
    1188           0 :                 INIT_LIST_HEAD(&wqe->all_list);
    1189             :         }
    1190             : 
    1191           0 :         wq->task = get_task_struct(data->task);
    1192           0 :         atomic_set(&wq->worker_refs, 1);
    1193           0 :         init_completion(&wq->worker_done);
    1194           0 :         return wq;
    1195             : err:
    1196           0 :         io_wq_put_hash(data->hash);
    1197           0 :         cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
    1198           0 :         for_each_node(node) {
    1199           0 :                 if (!wq->wqes[node])
    1200           0 :                         continue;
    1201           0 :                 free_cpumask_var(wq->wqes[node]->cpu_mask);
    1202           0 :                 kfree(wq->wqes[node]);
    1203             :         }
    1204             : err_wq:
    1205           0 :         kfree(wq);
    1206           0 :         return ERR_PTR(ret);
    1207             : }
    1208             : 
    1209           0 : static bool io_task_work_match(struct callback_head *cb, void *data)
    1210             : {
    1211             :         struct io_worker *worker;
    1212             : 
    1213           0 :         if (cb->func != create_worker_cb && cb->func != create_worker_cont)
    1214             :                 return false;
    1215           0 :         worker = container_of(cb, struct io_worker, create_work);
    1216           0 :         return worker->wqe->wq == data;
    1217             : }
    1218             : 
    1219           0 : void io_wq_exit_start(struct io_wq *wq)
    1220             : {
    1221           0 :         set_bit(IO_WQ_BIT_EXIT, &wq->state);
    1222           0 : }
    1223             : 
    1224           0 : static void io_wq_cancel_tw_create(struct io_wq *wq)
    1225             : {
    1226             :         struct callback_head *cb;
    1227             : 
    1228           0 :         while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
    1229             :                 struct io_worker *worker;
    1230             : 
    1231           0 :                 worker = container_of(cb, struct io_worker, create_work);
    1232           0 :                 io_worker_cancel_cb(worker);
    1233             :                 /*
    1234             :                  * Only the worker continuation helper has worker allocated and
    1235             :                  * hence needs freeing.
    1236             :                  */
    1237           0 :                 if (cb->func == create_worker_cont)
    1238           0 :                         kfree(worker);
    1239             :         }
    1240           0 : }
    1241             : 
    1242           0 : static void io_wq_exit_workers(struct io_wq *wq)
    1243             : {
    1244             :         int node;
    1245             : 
    1246           0 :         if (!wq->task)
    1247             :                 return;
    1248             : 
    1249           0 :         io_wq_cancel_tw_create(wq);
    1250             : 
    1251             :         rcu_read_lock();
    1252           0 :         for_each_node(node) {
    1253           0 :                 struct io_wqe *wqe = wq->wqes[node];
    1254             : 
    1255           0 :                 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
    1256             :         }
    1257           0 :         rcu_read_unlock();
    1258           0 :         io_worker_ref_put(wq);
    1259           0 :         wait_for_completion(&wq->worker_done);
    1260             : 
    1261           0 :         for_each_node(node) {
    1262           0 :                 spin_lock_irq(&wq->hash->wait.lock);
    1263           0 :                 list_del_init(&wq->wqes[node]->wait.entry);
    1264           0 :                 spin_unlock_irq(&wq->hash->wait.lock);
    1265             :         }
    1266           0 :         put_task_struct(wq->task);
    1267           0 :         wq->task = NULL;
    1268             : }
    1269             : 
    1270           0 : static void io_wq_destroy(struct io_wq *wq)
    1271             : {
    1272             :         int node;
    1273             : 
    1274           0 :         cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
    1275             : 
    1276           0 :         for_each_node(node) {
    1277           0 :                 struct io_wqe *wqe = wq->wqes[node];
    1278           0 :                 struct io_cb_cancel_data match = {
    1279             :                         .fn             = io_wq_work_match_all,
    1280             :                         .cancel_all     = true,
    1281             :                 };
    1282           0 :                 io_wqe_cancel_pending_work(wqe, &match);
    1283           0 :                 free_cpumask_var(wqe->cpu_mask);
    1284           0 :                 kfree(wqe);
    1285             :         }
    1286           0 :         io_wq_put_hash(wq->hash);
    1287           0 :         kfree(wq);
    1288           0 : }
    1289             : 
    1290           0 : void io_wq_put_and_exit(struct io_wq *wq)
    1291             : {
    1292           0 :         WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
    1293             : 
    1294           0 :         io_wq_exit_workers(wq);
    1295           0 :         io_wq_destroy(wq);
    1296           0 : }
    1297             : 
    1298             : struct online_data {
    1299             :         unsigned int cpu;
    1300             :         bool online;
    1301             : };
    1302             : 
    1303           0 : static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
    1304             : {
    1305           0 :         struct online_data *od = data;
    1306             : 
    1307           0 :         if (od->online)
    1308           0 :                 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
    1309             :         else
    1310           0 :                 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
    1311           0 :         return false;
    1312             : }
    1313             : 
    1314             : static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
    1315             : {
    1316           0 :         struct online_data od = {
    1317             :                 .cpu = cpu,
    1318             :                 .online = online
    1319             :         };
    1320             :         int i;
    1321             : 
    1322             :         rcu_read_lock();
    1323           0 :         for_each_node(i)
    1324           0 :                 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
    1325             :         rcu_read_unlock();
    1326             :         return 0;
    1327             : }
    1328             : 
    1329           0 : static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
    1330             : {
    1331           0 :         struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
    1332             : 
    1333           0 :         return __io_wq_cpu_online(wq, cpu, true);
    1334             : }
    1335             : 
    1336           0 : static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
    1337             : {
    1338           0 :         struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
    1339             : 
    1340           0 :         return __io_wq_cpu_online(wq, cpu, false);
    1341             : }
    1342             : 
    1343           0 : int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
    1344             : {
    1345             :         int i;
    1346             : 
    1347             :         rcu_read_lock();
    1348           0 :         for_each_node(i) {
    1349           0 :                 struct io_wqe *wqe = wq->wqes[i];
    1350             : 
    1351           0 :                 if (mask)
    1352           0 :                         cpumask_copy(wqe->cpu_mask, mask);
    1353             :                 else
    1354           0 :                         cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
    1355             :         }
    1356             :         rcu_read_unlock();
    1357           0 :         return 0;
    1358             : }
    1359             : 
    1360             : /*
    1361             :  * Set max number of unbounded workers, returns old value. If new_count is 0,
    1362             :  * then just return the old value.
    1363             :  */
    1364           0 : int io_wq_max_workers(struct io_wq *wq, int *new_count)
    1365             : {
    1366             :         int prev[IO_WQ_ACCT_NR];
    1367           0 :         bool first_node = true;
    1368             :         int i, node;
    1369             : 
    1370             :         BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
    1371             :         BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
    1372             :         BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
    1373             : 
    1374           0 :         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
    1375           0 :                 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
    1376           0 :                         new_count[i] = task_rlimit(current, RLIMIT_NPROC);
    1377             :         }
    1378             : 
    1379           0 :         for (i = 0; i < IO_WQ_ACCT_NR; i++)
    1380           0 :                 prev[i] = 0;
    1381             : 
    1382             :         rcu_read_lock();
    1383           0 :         for_each_node(node) {
    1384           0 :                 struct io_wqe *wqe = wq->wqes[node];
    1385             :                 struct io_wqe_acct *acct;
    1386             : 
    1387           0 :                 raw_spin_lock(&wqe->lock);
    1388           0 :                 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
    1389           0 :                         acct = &wqe->acct[i];
    1390           0 :                         if (first_node)
    1391           0 :                                 prev[i] = max_t(int, acct->max_workers, prev[i]);
    1392           0 :                         if (new_count[i])
    1393           0 :                                 acct->max_workers = new_count[i];
    1394             :                 }
    1395           0 :                 raw_spin_unlock(&wqe->lock);
    1396           0 :                 first_node = false;
    1397             :         }
    1398             :         rcu_read_unlock();
    1399             : 
    1400           0 :         for (i = 0; i < IO_WQ_ACCT_NR; i++)
    1401           0 :                 new_count[i] = prev[i];
    1402             : 
    1403           0 :         return 0;
    1404             : }
    1405             : 
    1406           1 : static __init int io_wq_init(void)
    1407             : {
    1408             :         int ret;
    1409             : 
    1410           1 :         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
    1411             :                                         io_wq_cpu_online, io_wq_cpu_offline);
    1412           1 :         if (ret < 0)
    1413             :                 return ret;
    1414           1 :         io_wq_online = ret;
    1415           1 :         return 0;
    1416             : }
    1417             : subsys_initcall(io_wq_init);

Generated by: LCOV version 1.14