Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Block device elevator/IO-scheduler.
4 : *
5 : * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 : *
7 : * 30042000 Jens Axboe <axboe@kernel.dk> :
8 : *
9 : * Split the elevator a bit so that it is possible to choose a different
10 : * one or even write a new "plug in". There are three pieces:
11 : * - elevator_fn, inserts a new request in the queue list
12 : * - elevator_merge_fn, decides whether a new buffer can be merged with
13 : * an existing request
14 : * - elevator_dequeue_fn, called when a request is taken off the active list
15 : *
16 : * 20082000 Dave Jones <davej@suse.de> :
17 : * Removed tests for max-bomb-segments, which was breaking elvtune
18 : * when run without -bN
19 : *
20 : * Jens:
21 : * - Rework again to work with bio instead of buffer_heads
22 : * - loose bi_dev comparisons, partition handling is right now
23 : * - completely modularize elevator setup and teardown
24 : *
25 : */
26 : #include <linux/kernel.h>
27 : #include <linux/fs.h>
28 : #include <linux/blkdev.h>
29 : #include <linux/bio.h>
30 : #include <linux/module.h>
31 : #include <linux/slab.h>
32 : #include <linux/init.h>
33 : #include <linux/compiler.h>
34 : #include <linux/blktrace_api.h>
35 : #include <linux/hash.h>
36 : #include <linux/uaccess.h>
37 : #include <linux/pm_runtime.h>
38 :
39 : #include <trace/events/block.h>
40 :
41 : #include "elevator.h"
42 : #include "blk.h"
43 : #include "blk-mq-sched.h"
44 : #include "blk-pm.h"
45 : #include "blk-wbt.h"
46 : #include "blk-cgroup.h"
47 :
48 : static DEFINE_SPINLOCK(elv_list_lock);
49 : static LIST_HEAD(elv_list);
50 :
51 : /*
52 : * Merge hash stuff.
53 : */
54 : #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55 :
56 : /*
57 : * Query io scheduler to see if the current process issuing bio may be
58 : * merged with rq.
59 : */
60 : static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61 : {
62 0 : struct request_queue *q = rq->q;
63 0 : struct elevator_queue *e = q->elevator;
64 :
65 0 : if (e->type->ops.allow_merge)
66 0 : return e->type->ops.allow_merge(q, rq, bio);
67 :
68 : return true;
69 : }
70 :
71 : /*
72 : * can we safely merge with this request?
73 : */
74 0 : bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75 : {
76 0 : if (!blk_rq_merge_ok(rq, bio))
77 : return false;
78 :
79 0 : if (!elv_iosched_allow_bio_merge(rq, bio))
80 : return false;
81 :
82 0 : return true;
83 : }
84 : EXPORT_SYMBOL(elv_bio_merge_ok);
85 :
86 : static inline bool elv_support_features(struct request_queue *q,
87 : const struct elevator_type *e)
88 : {
89 0 : return (q->required_elevator_features & e->elevator_features) ==
90 : q->required_elevator_features;
91 : }
92 :
93 : /**
94 : * elevator_match - Check whether @e's name or alias matches @name
95 : * @e: Scheduler to test
96 : * @name: Elevator name to test
97 : *
98 : * Return true if the elevator @e's name or alias matches @name.
99 : */
100 1 : static bool elevator_match(const struct elevator_type *e, const char *name)
101 : {
102 3 : return !strcmp(e->elevator_name, name) ||
103 2 : (e->elevator_alias && !strcmp(e->elevator_alias, name));
104 : }
105 :
106 2 : static struct elevator_type *__elevator_find(const char *name)
107 : {
108 : struct elevator_type *e;
109 :
110 3 : list_for_each_entry(e, &elv_list, list)
111 1 : if (elevator_match(e, name))
112 : return e;
113 : return NULL;
114 : }
115 :
116 : static struct elevator_type *elevator_find_get(struct request_queue *q,
117 : const char *name)
118 : {
119 : struct elevator_type *e;
120 :
121 0 : spin_lock(&elv_list_lock);
122 0 : e = __elevator_find(name);
123 0 : if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
124 : e = NULL;
125 0 : spin_unlock(&elv_list_lock);
126 : return e;
127 : }
128 :
129 : static const struct kobj_type elv_ktype;
130 :
131 0 : struct elevator_queue *elevator_alloc(struct request_queue *q,
132 : struct elevator_type *e)
133 : {
134 : struct elevator_queue *eq;
135 :
136 0 : eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
137 0 : if (unlikely(!eq))
138 : return NULL;
139 :
140 0 : __elevator_get(e);
141 0 : eq->type = e;
142 0 : kobject_init(&eq->kobj, &elv_ktype);
143 0 : mutex_init(&eq->sysfs_lock);
144 0 : hash_init(eq->hash);
145 :
146 : return eq;
147 : }
148 : EXPORT_SYMBOL(elevator_alloc);
149 :
150 0 : static void elevator_release(struct kobject *kobj)
151 : {
152 : struct elevator_queue *e;
153 :
154 0 : e = container_of(kobj, struct elevator_queue, kobj);
155 0 : elevator_put(e->type);
156 0 : kfree(e);
157 0 : }
158 :
159 0 : void elevator_exit(struct request_queue *q)
160 : {
161 0 : struct elevator_queue *e = q->elevator;
162 :
163 0 : ioc_clear_queue(q);
164 0 : blk_mq_sched_free_rqs(q);
165 :
166 0 : mutex_lock(&e->sysfs_lock);
167 0 : blk_mq_exit_sched(q, e);
168 0 : mutex_unlock(&e->sysfs_lock);
169 :
170 0 : kobject_put(&e->kobj);
171 0 : }
172 :
173 : static inline void __elv_rqhash_del(struct request *rq)
174 : {
175 0 : hash_del(&rq->hash);
176 0 : rq->rq_flags &= ~RQF_HASHED;
177 : }
178 :
179 0 : void elv_rqhash_del(struct request_queue *q, struct request *rq)
180 : {
181 0 : if (ELV_ON_HASH(rq))
182 : __elv_rqhash_del(rq);
183 0 : }
184 : EXPORT_SYMBOL_GPL(elv_rqhash_del);
185 :
186 0 : void elv_rqhash_add(struct request_queue *q, struct request *rq)
187 : {
188 0 : struct elevator_queue *e = q->elevator;
189 :
190 0 : BUG_ON(ELV_ON_HASH(rq));
191 0 : hash_add(e->hash, &rq->hash, rq_hash_key(rq));
192 0 : rq->rq_flags |= RQF_HASHED;
193 0 : }
194 : EXPORT_SYMBOL_GPL(elv_rqhash_add);
195 :
196 0 : void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
197 : {
198 0 : __elv_rqhash_del(rq);
199 0 : elv_rqhash_add(q, rq);
200 0 : }
201 :
202 0 : struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
203 : {
204 0 : struct elevator_queue *e = q->elevator;
205 : struct hlist_node *next;
206 : struct request *rq;
207 :
208 0 : hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
209 0 : BUG_ON(!ELV_ON_HASH(rq));
210 :
211 0 : if (unlikely(!rq_mergeable(rq))) {
212 0 : __elv_rqhash_del(rq);
213 0 : continue;
214 : }
215 :
216 0 : if (rq_hash_key(rq) == offset)
217 : return rq;
218 : }
219 :
220 : return NULL;
221 : }
222 :
223 : /*
224 : * RB-tree support functions for inserting/lookup/removal of requests
225 : * in a sorted RB tree.
226 : */
227 0 : void elv_rb_add(struct rb_root *root, struct request *rq)
228 : {
229 0 : struct rb_node **p = &root->rb_node;
230 0 : struct rb_node *parent = NULL;
231 : struct request *__rq;
232 :
233 0 : while (*p) {
234 0 : parent = *p;
235 0 : __rq = rb_entry(parent, struct request, rb_node);
236 :
237 0 : if (blk_rq_pos(rq) < blk_rq_pos(__rq))
238 0 : p = &(*p)->rb_left;
239 0 : else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
240 0 : p = &(*p)->rb_right;
241 : }
242 :
243 0 : rb_link_node(&rq->rb_node, parent, p);
244 0 : rb_insert_color(&rq->rb_node, root);
245 0 : }
246 : EXPORT_SYMBOL(elv_rb_add);
247 :
248 0 : void elv_rb_del(struct rb_root *root, struct request *rq)
249 : {
250 0 : BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
251 0 : rb_erase(&rq->rb_node, root);
252 0 : RB_CLEAR_NODE(&rq->rb_node);
253 0 : }
254 : EXPORT_SYMBOL(elv_rb_del);
255 :
256 0 : struct request *elv_rb_find(struct rb_root *root, sector_t sector)
257 : {
258 0 : struct rb_node *n = root->rb_node;
259 : struct request *rq;
260 :
261 0 : while (n) {
262 0 : rq = rb_entry(n, struct request, rb_node);
263 :
264 0 : if (sector < blk_rq_pos(rq))
265 0 : n = n->rb_left;
266 0 : else if (sector > blk_rq_pos(rq))
267 0 : n = n->rb_right;
268 : else
269 : return rq;
270 : }
271 :
272 : return NULL;
273 : }
274 : EXPORT_SYMBOL(elv_rb_find);
275 :
276 0 : enum elv_merge elv_merge(struct request_queue *q, struct request **req,
277 : struct bio *bio)
278 : {
279 0 : struct elevator_queue *e = q->elevator;
280 : struct request *__rq;
281 :
282 : /*
283 : * Levels of merges:
284 : * nomerges: No merges at all attempted
285 : * noxmerges: Only simple one-hit cache try
286 : * merges: All merge tries attempted
287 : */
288 0 : if (blk_queue_nomerges(q) || !bio_mergeable(bio))
289 : return ELEVATOR_NO_MERGE;
290 :
291 : /*
292 : * First try one-hit cache.
293 : */
294 0 : if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
295 0 : enum elv_merge ret = blk_try_merge(q->last_merge, bio);
296 :
297 0 : if (ret != ELEVATOR_NO_MERGE) {
298 0 : *req = q->last_merge;
299 0 : return ret;
300 : }
301 : }
302 :
303 0 : if (blk_queue_noxmerges(q))
304 : return ELEVATOR_NO_MERGE;
305 :
306 : /*
307 : * See if our hash lookup can find a potential backmerge.
308 : */
309 0 : __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
310 0 : if (__rq && elv_bio_merge_ok(__rq, bio)) {
311 0 : *req = __rq;
312 :
313 0 : if (blk_discard_mergable(__rq))
314 : return ELEVATOR_DISCARD_MERGE;
315 0 : return ELEVATOR_BACK_MERGE;
316 : }
317 :
318 0 : if (e->type->ops.request_merge)
319 0 : return e->type->ops.request_merge(q, req, bio);
320 :
321 : return ELEVATOR_NO_MERGE;
322 : }
323 :
324 : /*
325 : * Attempt to do an insertion back merge. Only check for the case where
326 : * we can append 'rq' to an existing request, so we can throw 'rq' away
327 : * afterwards.
328 : *
329 : * Returns true if we merged, false otherwise. 'free' will contain all
330 : * requests that need to be freed.
331 : */
332 0 : bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
333 : struct list_head *free)
334 : {
335 : struct request *__rq;
336 : bool ret;
337 :
338 0 : if (blk_queue_nomerges(q))
339 : return false;
340 :
341 : /*
342 : * First try one-hit cache.
343 : */
344 0 : if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
345 0 : list_add(&rq->queuelist, free);
346 0 : return true;
347 : }
348 :
349 0 : if (blk_queue_noxmerges(q))
350 : return false;
351 :
352 : ret = false;
353 : /*
354 : * See if our hash lookup can find a potential backmerge.
355 : */
356 : while (1) {
357 0 : __rq = elv_rqhash_find(q, blk_rq_pos(rq));
358 0 : if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
359 : break;
360 :
361 0 : list_add(&rq->queuelist, free);
362 : /* The merged request could be merged with others, try again */
363 0 : ret = true;
364 0 : rq = __rq;
365 : }
366 :
367 : return ret;
368 : }
369 :
370 0 : void elv_merged_request(struct request_queue *q, struct request *rq,
371 : enum elv_merge type)
372 : {
373 0 : struct elevator_queue *e = q->elevator;
374 :
375 0 : if (e->type->ops.request_merged)
376 0 : e->type->ops.request_merged(q, rq, type);
377 :
378 0 : if (type == ELEVATOR_BACK_MERGE)
379 0 : elv_rqhash_reposition(q, rq);
380 :
381 0 : q->last_merge = rq;
382 0 : }
383 :
384 0 : void elv_merge_requests(struct request_queue *q, struct request *rq,
385 : struct request *next)
386 : {
387 0 : struct elevator_queue *e = q->elevator;
388 :
389 0 : if (e->type->ops.requests_merged)
390 0 : e->type->ops.requests_merged(q, rq, next);
391 :
392 0 : elv_rqhash_reposition(q, rq);
393 0 : q->last_merge = rq;
394 0 : }
395 :
396 0 : struct request *elv_latter_request(struct request_queue *q, struct request *rq)
397 : {
398 0 : struct elevator_queue *e = q->elevator;
399 :
400 0 : if (e->type->ops.next_request)
401 0 : return e->type->ops.next_request(q, rq);
402 :
403 : return NULL;
404 : }
405 :
406 0 : struct request *elv_former_request(struct request_queue *q, struct request *rq)
407 : {
408 0 : struct elevator_queue *e = q->elevator;
409 :
410 0 : if (e->type->ops.former_request)
411 0 : return e->type->ops.former_request(q, rq);
412 :
413 : return NULL;
414 : }
415 :
416 : #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
417 :
418 : static ssize_t
419 0 : elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
420 : {
421 0 : struct elv_fs_entry *entry = to_elv(attr);
422 : struct elevator_queue *e;
423 : ssize_t error;
424 :
425 0 : if (!entry->show)
426 : return -EIO;
427 :
428 0 : e = container_of(kobj, struct elevator_queue, kobj);
429 0 : mutex_lock(&e->sysfs_lock);
430 0 : error = e->type ? entry->show(e, page) : -ENOENT;
431 0 : mutex_unlock(&e->sysfs_lock);
432 0 : return error;
433 : }
434 :
435 : static ssize_t
436 0 : elv_attr_store(struct kobject *kobj, struct attribute *attr,
437 : const char *page, size_t length)
438 : {
439 0 : struct elv_fs_entry *entry = to_elv(attr);
440 : struct elevator_queue *e;
441 : ssize_t error;
442 :
443 0 : if (!entry->store)
444 : return -EIO;
445 :
446 0 : e = container_of(kobj, struct elevator_queue, kobj);
447 0 : mutex_lock(&e->sysfs_lock);
448 0 : error = e->type ? entry->store(e, page, length) : -ENOENT;
449 0 : mutex_unlock(&e->sysfs_lock);
450 0 : return error;
451 : }
452 :
453 : static const struct sysfs_ops elv_sysfs_ops = {
454 : .show = elv_attr_show,
455 : .store = elv_attr_store,
456 : };
457 :
458 : static const struct kobj_type elv_ktype = {
459 : .sysfs_ops = &elv_sysfs_ops,
460 : .release = elevator_release,
461 : };
462 :
463 0 : int elv_register_queue(struct request_queue *q, bool uevent)
464 : {
465 0 : struct elevator_queue *e = q->elevator;
466 : int error;
467 :
468 : lockdep_assert_held(&q->sysfs_lock);
469 :
470 0 : error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
471 0 : if (!error) {
472 0 : struct elv_fs_entry *attr = e->type->elevator_attrs;
473 0 : if (attr) {
474 0 : while (attr->attr.name) {
475 0 : if (sysfs_create_file(&e->kobj, &attr->attr))
476 : break;
477 0 : attr++;
478 : }
479 : }
480 0 : if (uevent)
481 0 : kobject_uevent(&e->kobj, KOBJ_ADD);
482 :
483 0 : set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
484 : }
485 0 : return error;
486 : }
487 :
488 0 : void elv_unregister_queue(struct request_queue *q)
489 : {
490 0 : struct elevator_queue *e = q->elevator;
491 :
492 : lockdep_assert_held(&q->sysfs_lock);
493 :
494 0 : if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
495 0 : kobject_uevent(&e->kobj, KOBJ_REMOVE);
496 0 : kobject_del(&e->kobj);
497 : }
498 0 : }
499 :
500 2 : int elv_register(struct elevator_type *e)
501 : {
502 : /* insert_requests and dispatch_request are mandatory */
503 2 : if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
504 : return -EINVAL;
505 :
506 : /* create icq_cache if requested */
507 2 : if (e->icq_size) {
508 0 : if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
509 0 : WARN_ON(e->icq_align < __alignof__(struct io_cq)))
510 : return -EINVAL;
511 :
512 0 : snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
513 : "%s_io_cq", e->elevator_name);
514 0 : e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
515 0 : e->icq_align, 0, NULL);
516 0 : if (!e->icq_cache)
517 : return -ENOMEM;
518 : }
519 :
520 : /* register, don't allow duplicate names */
521 2 : spin_lock(&elv_list_lock);
522 2 : if (__elevator_find(e->elevator_name)) {
523 0 : spin_unlock(&elv_list_lock);
524 0 : kmem_cache_destroy(e->icq_cache);
525 0 : return -EBUSY;
526 : }
527 4 : list_add_tail(&e->list, &elv_list);
528 2 : spin_unlock(&elv_list_lock);
529 :
530 2 : printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
531 :
532 2 : return 0;
533 : }
534 : EXPORT_SYMBOL_GPL(elv_register);
535 :
536 0 : void elv_unregister(struct elevator_type *e)
537 : {
538 : /* unregister */
539 0 : spin_lock(&elv_list_lock);
540 0 : list_del_init(&e->list);
541 0 : spin_unlock(&elv_list_lock);
542 :
543 : /*
544 : * Destroy icq_cache if it exists. icq's are RCU managed. Make
545 : * sure all RCU operations are complete before proceeding.
546 : */
547 0 : if (e->icq_cache) {
548 0 : rcu_barrier();
549 0 : kmem_cache_destroy(e->icq_cache);
550 0 : e->icq_cache = NULL;
551 : }
552 0 : }
553 : EXPORT_SYMBOL_GPL(elv_unregister);
554 :
555 : static inline bool elv_support_iosched(struct request_queue *q)
556 : {
557 0 : if (!queue_is_mq(q) ||
558 0 : (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
559 : return false;
560 : return true;
561 : }
562 :
563 : /*
564 : * For single queue devices, default to using mq-deadline. If we have multiple
565 : * queues or mq-deadline is not available, default to "none".
566 : */
567 0 : static struct elevator_type *elevator_get_default(struct request_queue *q)
568 : {
569 0 : if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
570 : return NULL;
571 :
572 0 : if (q->nr_hw_queues != 1 &&
573 0 : !blk_mq_is_shared_tags(q->tag_set->flags))
574 : return NULL;
575 :
576 0 : return elevator_find_get(q, "mq-deadline");
577 : }
578 :
579 : /*
580 : * Get the first elevator providing the features required by the request queue.
581 : * Default to "none" if no matching elevator is found.
582 : */
583 : static struct elevator_type *elevator_get_by_features(struct request_queue *q)
584 : {
585 0 : struct elevator_type *e, *found = NULL;
586 :
587 0 : spin_lock(&elv_list_lock);
588 :
589 0 : list_for_each_entry(e, &elv_list, list) {
590 0 : if (elv_support_features(q, e)) {
591 : found = e;
592 : break;
593 : }
594 : }
595 :
596 : if (found && !elevator_tryget(found))
597 : found = NULL;
598 :
599 0 : spin_unlock(&elv_list_lock);
600 : return found;
601 : }
602 :
603 : /*
604 : * For a device queue that has no required features, use the default elevator
605 : * settings. Otherwise, use the first elevator available matching the required
606 : * features. If no suitable elevator is find or if the chosen elevator
607 : * initialization fails, fall back to the "none" elevator (no elevator).
608 : */
609 0 : void elevator_init_mq(struct request_queue *q)
610 : {
611 : struct elevator_type *e;
612 : int err;
613 :
614 0 : if (!elv_support_iosched(q))
615 : return;
616 :
617 0 : WARN_ON_ONCE(blk_queue_registered(q));
618 :
619 0 : if (unlikely(q->elevator))
620 : return;
621 :
622 0 : if (!q->required_elevator_features)
623 0 : e = elevator_get_default(q);
624 : else
625 0 : e = elevator_get_by_features(q);
626 0 : if (!e)
627 : return;
628 :
629 : /*
630 : * We are called before adding disk, when there isn't any FS I/O,
631 : * so freezing queue plus canceling dispatch work is enough to
632 : * drain any dispatch activities originated from passthrough
633 : * requests, then no need to quiesce queue which may add long boot
634 : * latency, especially when lots of disks are involved.
635 : */
636 0 : blk_mq_freeze_queue(q);
637 0 : blk_mq_cancel_work_sync(q);
638 :
639 0 : err = blk_mq_init_sched(q, e);
640 :
641 0 : blk_mq_unfreeze_queue(q);
642 :
643 0 : if (err) {
644 0 : pr_warn("\"%s\" elevator initialization failed, "
645 : "falling back to \"none\"\n", e->elevator_name);
646 : }
647 :
648 : elevator_put(e);
649 : }
650 :
651 : /*
652 : * Switch to new_e io scheduler.
653 : *
654 : * If switching fails, we are most likely running out of memory and not able
655 : * to restore the old io scheduler, so leaving the io scheduler being none.
656 : */
657 0 : int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
658 : {
659 : int ret;
660 :
661 : lockdep_assert_held(&q->sysfs_lock);
662 :
663 0 : blk_mq_freeze_queue(q);
664 0 : blk_mq_quiesce_queue(q);
665 :
666 0 : if (q->elevator) {
667 0 : elv_unregister_queue(q);
668 0 : elevator_exit(q);
669 : }
670 :
671 0 : ret = blk_mq_init_sched(q, new_e);
672 0 : if (ret)
673 : goto out_unfreeze;
674 :
675 0 : ret = elv_register_queue(q, true);
676 0 : if (ret) {
677 0 : elevator_exit(q);
678 0 : goto out_unfreeze;
679 : }
680 : blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
681 :
682 : out_unfreeze:
683 0 : blk_mq_unquiesce_queue(q);
684 0 : blk_mq_unfreeze_queue(q);
685 :
686 0 : if (ret) {
687 0 : pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
688 : new_e->elevator_name);
689 : }
690 :
691 0 : return ret;
692 : }
693 :
694 0 : void elevator_disable(struct request_queue *q)
695 : {
696 : lockdep_assert_held(&q->sysfs_lock);
697 :
698 0 : blk_mq_freeze_queue(q);
699 0 : blk_mq_quiesce_queue(q);
700 :
701 0 : elv_unregister_queue(q);
702 0 : elevator_exit(q);
703 0 : blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
704 0 : q->elevator = NULL;
705 0 : q->nr_requests = q->tag_set->queue_depth;
706 : blk_add_trace_msg(q, "elv switch: none");
707 :
708 0 : blk_mq_unquiesce_queue(q);
709 0 : blk_mq_unfreeze_queue(q);
710 0 : }
711 :
712 : /*
713 : * Switch this queue to the given IO scheduler.
714 : */
715 0 : static int elevator_change(struct request_queue *q, const char *elevator_name)
716 : {
717 : struct elevator_type *e;
718 : int ret;
719 :
720 : /* Make sure queue is not in the middle of being removed */
721 0 : if (!blk_queue_registered(q))
722 : return -ENOENT;
723 :
724 0 : if (!strncmp(elevator_name, "none", 4)) {
725 0 : if (q->elevator)
726 0 : elevator_disable(q);
727 : return 0;
728 : }
729 :
730 0 : if (q->elevator && elevator_match(q->elevator->type, elevator_name))
731 : return 0;
732 :
733 0 : e = elevator_find_get(q, elevator_name);
734 0 : if (!e) {
735 0 : request_module("%s-iosched", elevator_name);
736 0 : e = elevator_find_get(q, elevator_name);
737 0 : if (!e)
738 : return -EINVAL;
739 : }
740 0 : ret = elevator_switch(q, e);
741 0 : elevator_put(e);
742 0 : return ret;
743 : }
744 :
745 0 : ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
746 : size_t count)
747 : {
748 : char elevator_name[ELV_NAME_MAX];
749 : int ret;
750 :
751 0 : if (!elv_support_iosched(q))
752 0 : return count;
753 :
754 0 : strscpy(elevator_name, buf, sizeof(elevator_name));
755 0 : ret = elevator_change(q, strstrip(elevator_name));
756 0 : if (!ret)
757 0 : return count;
758 0 : return ret;
759 : }
760 :
761 0 : ssize_t elv_iosched_show(struct request_queue *q, char *name)
762 : {
763 0 : struct elevator_queue *eq = q->elevator;
764 0 : struct elevator_type *cur = NULL, *e;
765 0 : int len = 0;
766 :
767 0 : if (!elv_support_iosched(q))
768 0 : return sprintf(name, "none\n");
769 :
770 0 : if (!q->elevator) {
771 0 : len += sprintf(name+len, "[none] ");
772 : } else {
773 0 : len += sprintf(name+len, "none ");
774 0 : cur = eq->type;
775 : }
776 :
777 0 : spin_lock(&elv_list_lock);
778 0 : list_for_each_entry(e, &elv_list, list) {
779 0 : if (e == cur)
780 0 : len += sprintf(name+len, "[%s] ", e->elevator_name);
781 0 : else if (elv_support_features(q, e))
782 0 : len += sprintf(name+len, "%s ", e->elevator_name);
783 : }
784 0 : spin_unlock(&elv_list_lock);
785 :
786 0 : len += sprintf(name+len, "\n");
787 0 : return len;
788 : }
789 :
790 0 : struct request *elv_rb_former_request(struct request_queue *q,
791 : struct request *rq)
792 : {
793 0 : struct rb_node *rbprev = rb_prev(&rq->rb_node);
794 :
795 0 : if (rbprev)
796 0 : return rb_entry_rq(rbprev);
797 :
798 : return NULL;
799 : }
800 : EXPORT_SYMBOL(elv_rb_former_request);
801 :
802 0 : struct request *elv_rb_latter_request(struct request_queue *q,
803 : struct request *rq)
804 : {
805 0 : struct rb_node *rbnext = rb_next(&rq->rb_node);
806 :
807 0 : if (rbnext)
808 0 : return rb_entry_rq(rbnext);
809 :
810 : return NULL;
811 : }
812 : EXPORT_SYMBOL(elv_rb_latter_request);
813 :
814 0 : static int __init elevator_setup(char *str)
815 : {
816 0 : pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
817 : "Please use sysfs to set IO scheduler for individual devices.\n");
818 0 : return 1;
819 : }
820 :
821 : __setup("elevator=", elevator_setup);
|