Christoph Hellwig | 3dcf60bc | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 2 | /* |
| 3 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, |
| 4 | * for the blk-mq scheduling framework |
| 5 | * |
| 6 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> |
| 7 | */ |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/blkdev.h> |
| 11 | #include <linux/blk-mq.h> |
| 12 | #include <linux/elevator.h> |
| 13 | #include <linux/bio.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/compiler.h> |
| 18 | #include <linux/rbtree.h> |
| 19 | #include <linux/sbitmap.h> |
| 20 | |
Chaitanya Kulkarni | b357e4a | 2021-02-21 21:29:59 -0800 | [diff] [blame] | 21 | #include <trace/events/block.h> |
| 22 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 23 | #include "blk.h" |
| 24 | #include "blk-mq.h" |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 25 | #include "blk-mq-debugfs.h" |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 26 | #include "blk-mq-tag.h" |
| 27 | #include "blk-mq-sched.h" |
| 28 | |
| 29 | /* |
Mauro Carvalho Chehab | 898bd37 | 2019-04-18 19:45:00 -0300 | [diff] [blame] | 30 | * See Documentation/block/deadline-iosched.rst |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 31 | */ |
| 32 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ |
| 33 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 34 | /* |
| 35 | * Time after which to dispatch lower priority requests even if higher |
| 36 | * priority requests are pending. |
| 37 | */ |
| 38 | static const int prio_aging_expire = 10 * HZ; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 39 | static const int writes_starved = 2; /* max times reads can starve a write */ |
| 40 | static const int fifo_batch = 16; /* # of sequential requests treated as one |
| 41 | by the above parameters. For throughput. */ |
| 42 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 43 | enum dd_data_dir { |
| 44 | DD_READ = READ, |
| 45 | DD_WRITE = WRITE, |
| 46 | }; |
| 47 | |
| 48 | enum { DD_DIR_COUNT = 2 }; |
| 49 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 50 | enum dd_prio { |
| 51 | DD_RT_PRIO = 0, |
| 52 | DD_BE_PRIO = 1, |
| 53 | DD_IDLE_PRIO = 2, |
| 54 | DD_PRIO_MAX = 2, |
| 55 | }; |
| 56 | |
| 57 | enum { DD_PRIO_COUNT = 3 }; |
| 58 | |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 59 | /* |
| 60 | * I/O statistics per I/O priority. It is fine if these counters overflow. |
| 61 | * What matters is that these counters are at least as wide as |
| 62 | * log2(max_outstanding_requests). |
| 63 | */ |
Tejun Heo | 0f78399 | 2021-08-11 07:41:45 -1000 | [diff] [blame] | 64 | struct io_stats_per_prio { |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 65 | uint32_t inserted; |
| 66 | uint32_t merged; |
| 67 | uint32_t dispatched; |
| 68 | atomic_t completed; |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 69 | }; |
| 70 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 71 | /* |
| 72 | * Deadline scheduler data per I/O priority (enum dd_prio). Requests are |
| 73 | * present on both sort_list[] and fifo_list[]. |
| 74 | */ |
| 75 | struct dd_per_prio { |
| 76 | struct list_head dispatch; |
| 77 | struct rb_root sort_list[DD_DIR_COUNT]; |
| 78 | struct list_head fifo_list[DD_DIR_COUNT]; |
| 79 | /* Next request in FIFO order. Read, write or both are NULL. */ |
| 80 | struct request *next_rq[DD_DIR_COUNT]; |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 81 | struct io_stats_per_prio stats; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 82 | }; |
| 83 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 84 | struct deadline_data { |
| 85 | /* |
| 86 | * run time data |
| 87 | */ |
| 88 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 89 | struct dd_per_prio per_prio[DD_PRIO_COUNT]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 90 | |
Bart Van Assche | d672d32 | 2021-06-17 17:44:52 -0700 | [diff] [blame] | 91 | /* Data direction of latest dispatched request. */ |
| 92 | enum dd_data_dir last_dir; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 93 | unsigned int batching; /* number of sequential requests made */ |
| 94 | unsigned int starved; /* times reads have starved writes */ |
| 95 | |
| 96 | /* |
| 97 | * settings that change how the i/o scheduler behaves |
| 98 | */ |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 99 | int fifo_expire[DD_DIR_COUNT]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 100 | int fifo_batch; |
| 101 | int writes_starved; |
| 102 | int front_merges; |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 103 | u32 async_depth; |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 104 | int prio_aging_expire; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 105 | |
| 106 | spinlock_t lock; |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 107 | spinlock_t zone_lock; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 108 | }; |
| 109 | |
| 110 | /* Maps an I/O priority class to a deadline scheduler priority. */ |
| 111 | static const enum dd_prio ioprio_class_to_prio[] = { |
| 112 | [IOPRIO_CLASS_NONE] = DD_BE_PRIO, |
| 113 | [IOPRIO_CLASS_RT] = DD_RT_PRIO, |
| 114 | [IOPRIO_CLASS_BE] = DD_BE_PRIO, |
| 115 | [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 116 | }; |
| 117 | |
| 118 | static inline struct rb_root * |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 119 | deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 120 | { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 121 | return &per_prio->sort_list[rq_data_dir(rq)]; |
| 122 | } |
| 123 | |
| 124 | /* |
| 125 | * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a |
| 126 | * request. |
| 127 | */ |
| 128 | static u8 dd_rq_ioclass(struct request *rq) |
| 129 | { |
| 130 | return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | /* |
Damien Le Moal | 94fe975 | 2022-11-24 11:12:08 +0900 | [diff] [blame] | 134 | * get the request before `rq' in sector-sorted order |
| 135 | */ |
| 136 | static inline struct request * |
| 137 | deadline_earlier_request(struct request *rq) |
| 138 | { |
| 139 | struct rb_node *node = rb_prev(&rq->rb_node); |
| 140 | |
| 141 | if (node) |
| 142 | return rb_entry_rq(node); |
| 143 | |
| 144 | return NULL; |
| 145 | } |
| 146 | |
| 147 | /* |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 148 | * get the request after `rq' in sector-sorted order |
| 149 | */ |
| 150 | static inline struct request * |
| 151 | deadline_latter_request(struct request *rq) |
| 152 | { |
| 153 | struct rb_node *node = rb_next(&rq->rb_node); |
| 154 | |
| 155 | if (node) |
| 156 | return rb_entry_rq(node); |
| 157 | |
| 158 | return NULL; |
| 159 | } |
| 160 | |
| 161 | static void |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 162 | deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 163 | { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 164 | struct rb_root *root = deadline_rb_root(per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 165 | |
| 166 | elv_rb_add(root, rq); |
| 167 | } |
| 168 | |
| 169 | static inline void |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 170 | deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 171 | { |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 172 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 173 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 174 | if (per_prio->next_rq[data_dir] == rq) |
| 175 | per_prio->next_rq[data_dir] = deadline_latter_request(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 176 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 177 | elv_rb_del(deadline_rb_root(per_prio, rq), rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /* |
| 181 | * remove rq from rbtree and fifo. |
| 182 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 183 | static void deadline_remove_request(struct request_queue *q, |
| 184 | struct dd_per_prio *per_prio, |
| 185 | struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 186 | { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 187 | list_del_init(&rq->queuelist); |
| 188 | |
| 189 | /* |
| 190 | * We might not be on the rbtree, if we are doing an insert merge |
| 191 | */ |
| 192 | if (!RB_EMPTY_NODE(&rq->rb_node)) |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 193 | deadline_del_rq_rb(per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 194 | |
| 195 | elv_rqhash_del(q, rq); |
| 196 | if (q->last_merge == rq) |
| 197 | q->last_merge = NULL; |
| 198 | } |
| 199 | |
| 200 | static void dd_request_merged(struct request_queue *q, struct request *req, |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 201 | enum elv_merge type) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 202 | { |
| 203 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 204 | const u8 ioprio_class = dd_rq_ioclass(req); |
| 205 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
| 206 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 207 | |
| 208 | /* |
| 209 | * if the merge was a front merge, we need to reposition request |
| 210 | */ |
| 211 | if (type == ELEVATOR_FRONT_MERGE) { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 212 | elv_rb_del(deadline_rb_root(per_prio, req), req); |
| 213 | deadline_add_rq_rb(per_prio, req); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 214 | } |
| 215 | } |
| 216 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 217 | /* |
| 218 | * Callback function that is invoked after @next has been merged into @req. |
| 219 | */ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 220 | static void dd_merged_requests(struct request_queue *q, struct request *req, |
| 221 | struct request *next) |
| 222 | { |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 223 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 224 | const u8 ioprio_class = dd_rq_ioclass(next); |
| 225 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
| 226 | |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 227 | lockdep_assert_held(&dd->lock); |
| 228 | |
| 229 | dd->per_prio[prio].stats.merged++; |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 230 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 231 | /* |
| 232 | * if next expires before rq, assign its expire time to rq |
| 233 | * and move into next position (next will be deleted) in fifo |
| 234 | */ |
| 235 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { |
| 236 | if (time_before((unsigned long)next->fifo_time, |
| 237 | (unsigned long)req->fifo_time)) { |
| 238 | list_move(&req->queuelist, &next->queuelist); |
| 239 | req->fifo_time = next->fifo_time; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * kill knowledge of next, this one is a goner |
| 245 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 246 | deadline_remove_request(q, &dd->per_prio[prio], next); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | /* |
| 250 | * move an entry to dispatch queue |
| 251 | */ |
| 252 | static void |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 253 | deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
| 254 | struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 255 | { |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 256 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 257 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 258 | per_prio->next_rq[data_dir] = deadline_latter_request(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 259 | |
| 260 | /* |
| 261 | * take it off the sort and fifo list |
| 262 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 263 | deadline_remove_request(rq->q, per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 264 | } |
| 265 | |
Bart Van Assche | 2a8fbb9 | 2021-09-27 15:03:26 -0700 | [diff] [blame] | 266 | /* Number of requests queued for a given priority level. */ |
| 267 | static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) |
| 268 | { |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 269 | const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; |
| 270 | |
| 271 | lockdep_assert_held(&dd->lock); |
| 272 | |
| 273 | return stats->inserted - atomic_read(&stats->completed); |
Bart Van Assche | 2a8fbb9 | 2021-09-27 15:03:26 -0700 | [diff] [blame] | 274 | } |
| 275 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 276 | /* |
| 277 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, |
| 278 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) |
| 279 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 280 | static inline int deadline_check_fifo(struct dd_per_prio *per_prio, |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 281 | enum dd_data_dir data_dir) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 282 | { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 283 | struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 284 | |
| 285 | /* |
| 286 | * rq is expired! |
| 287 | */ |
| 288 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) |
| 289 | return 1; |
| 290 | |
| 291 | return 0; |
| 292 | } |
| 293 | |
| 294 | /* |
Damien Le Moal | 94fe975 | 2022-11-24 11:12:08 +0900 | [diff] [blame] | 295 | * Check if rq has a sequential request preceding it. |
| 296 | */ |
Damien Le Moal | 40a4797 | 2022-11-26 11:55:49 +0900 | [diff] [blame] | 297 | static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq) |
Damien Le Moal | 94fe975 | 2022-11-24 11:12:08 +0900 | [diff] [blame] | 298 | { |
| 299 | struct request *prev = deadline_earlier_request(rq); |
| 300 | |
| 301 | if (!prev) |
| 302 | return false; |
| 303 | |
| 304 | return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Skip all write requests that are sequential from @rq, even if we cross |
| 309 | * a zone boundary. |
| 310 | */ |
| 311 | static struct request *deadline_skip_seq_writes(struct deadline_data *dd, |
| 312 | struct request *rq) |
| 313 | { |
| 314 | sector_t pos = blk_rq_pos(rq); |
| 315 | sector_t skipped_sectors = 0; |
| 316 | |
| 317 | while (rq) { |
| 318 | if (blk_rq_pos(rq) != pos + skipped_sectors) |
| 319 | break; |
| 320 | skipped_sectors += blk_rq_sectors(rq); |
| 321 | rq = deadline_latter_request(rq); |
| 322 | } |
| 323 | |
| 324 | return rq; |
| 325 | } |
| 326 | |
| 327 | /* |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 328 | * For the specified data direction, return the next request to |
| 329 | * dispatch using arrival ordered lists. |
| 330 | */ |
| 331 | static struct request * |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 332 | deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
| 333 | enum dd_data_dir data_dir) |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 334 | { |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 335 | struct request *rq; |
| 336 | unsigned long flags; |
| 337 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 338 | if (list_empty(&per_prio->fifo_list[data_dir])) |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 339 | return NULL; |
| 340 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 341 | rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 342 | if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 343 | return rq; |
| 344 | |
| 345 | /* |
| 346 | * Look for a write request that can be dispatched, that is one with |
Damien Le Moal | 94fe975 | 2022-11-24 11:12:08 +0900 | [diff] [blame] | 347 | * an unlocked target zone. For some HDDs, breaking a sequential |
| 348 | * write stream can lead to lower throughput, so make sure to preserve |
| 349 | * sequential write streams, even if that stream crosses into the next |
| 350 | * zones and these zones are unlocked. |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 351 | */ |
| 352 | spin_lock_irqsave(&dd->zone_lock, flags); |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 353 | list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { |
Damien Le Moal | 94fe975 | 2022-11-24 11:12:08 +0900 | [diff] [blame] | 354 | if (blk_req_can_dispatch_to_zone(rq) && |
| 355 | (blk_queue_nonrot(rq->q) || |
Damien Le Moal | 40a4797 | 2022-11-26 11:55:49 +0900 | [diff] [blame] | 356 | !deadline_is_seq_write(dd, rq))) |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 357 | goto out; |
| 358 | } |
| 359 | rq = NULL; |
| 360 | out: |
| 361 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
| 362 | |
| 363 | return rq; |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | /* |
| 367 | * For the specified data direction, return the next request to |
| 368 | * dispatch using sector position sorted lists. |
| 369 | */ |
| 370 | static struct request * |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 371 | deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
| 372 | enum dd_data_dir data_dir) |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 373 | { |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 374 | struct request *rq; |
| 375 | unsigned long flags; |
| 376 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 377 | rq = per_prio->next_rq[data_dir]; |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 378 | if (!rq) |
| 379 | return NULL; |
| 380 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 381 | if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 382 | return rq; |
| 383 | |
| 384 | /* |
| 385 | * Look for a write request that can be dispatched, that is one with |
Damien Le Moal | 94fe975 | 2022-11-24 11:12:08 +0900 | [diff] [blame] | 386 | * an unlocked target zone. For some HDDs, breaking a sequential |
| 387 | * write stream can lead to lower throughput, so make sure to preserve |
| 388 | * sequential write streams, even if that stream crosses into the next |
| 389 | * zones and these zones are unlocked. |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 390 | */ |
| 391 | spin_lock_irqsave(&dd->zone_lock, flags); |
| 392 | while (rq) { |
| 393 | if (blk_req_can_dispatch_to_zone(rq)) |
| 394 | break; |
Damien Le Moal | 94fe975 | 2022-11-24 11:12:08 +0900 | [diff] [blame] | 395 | if (blk_queue_nonrot(rq->q)) |
| 396 | rq = deadline_latter_request(rq); |
| 397 | else |
| 398 | rq = deadline_skip_seq_writes(dd, rq); |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 399 | } |
| 400 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
| 401 | |
| 402 | return rq; |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | /* |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 406 | * Returns true if and only if @rq started after @latest_start where |
| 407 | * @latest_start is in jiffies. |
| 408 | */ |
| 409 | static bool started_after(struct deadline_data *dd, struct request *rq, |
| 410 | unsigned long latest_start) |
| 411 | { |
| 412 | unsigned long start_time = (unsigned long)rq->fifo_time; |
| 413 | |
| 414 | start_time -= dd->fifo_expire[rq_data_dir(rq)]; |
| 415 | |
| 416 | return time_after(start_time, latest_start); |
| 417 | } |
| 418 | |
| 419 | /* |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 420 | * deadline_dispatch_requests selects the best request according to |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 421 | * read/write expire, fifo_batch, etc and with a start time <= @latest_start. |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 422 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 423 | static struct request *__dd_dispatch_request(struct deadline_data *dd, |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 424 | struct dd_per_prio *per_prio, |
| 425 | unsigned long latest_start) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 426 | { |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 427 | struct request *rq, *next_rq; |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 428 | enum dd_data_dir data_dir; |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 429 | enum dd_prio prio; |
| 430 | u8 ioprio_class; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 431 | |
Bart Van Assche | 3bd473f | 2021-06-17 17:44:46 -0700 | [diff] [blame] | 432 | lockdep_assert_held(&dd->lock); |
| 433 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 434 | if (!list_empty(&per_prio->dispatch)) { |
| 435 | rq = list_first_entry(&per_prio->dispatch, struct request, |
| 436 | queuelist); |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 437 | if (started_after(dd, rq, latest_start)) |
| 438 | return NULL; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 439 | list_del_init(&rq->queuelist); |
| 440 | goto done; |
| 441 | } |
| 442 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 443 | /* |
| 444 | * batches are currently reads XOR writes |
| 445 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 446 | rq = deadline_next_request(dd, per_prio, dd->last_dir); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 447 | if (rq && dd->batching < dd->fifo_batch) |
| 448 | /* we have a next request are still entitled to batch */ |
| 449 | goto dispatch_request; |
| 450 | |
| 451 | /* |
| 452 | * at this point we are not running a batch. select the appropriate |
| 453 | * data direction (read / write) |
| 454 | */ |
| 455 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 456 | if (!list_empty(&per_prio->fifo_list[DD_READ])) { |
| 457 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ])); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 458 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 459 | if (deadline_fifo_request(dd, per_prio, DD_WRITE) && |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 460 | (dd->starved++ >= dd->writes_starved)) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 461 | goto dispatch_writes; |
| 462 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 463 | data_dir = DD_READ; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 464 | |
| 465 | goto dispatch_find_request; |
| 466 | } |
| 467 | |
| 468 | /* |
| 469 | * there are either no reads or writes have been starved |
| 470 | */ |
| 471 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 472 | if (!list_empty(&per_prio->fifo_list[DD_WRITE])) { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 473 | dispatch_writes: |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 474 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE])); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 475 | |
| 476 | dd->starved = 0; |
| 477 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 478 | data_dir = DD_WRITE; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 479 | |
| 480 | goto dispatch_find_request; |
| 481 | } |
| 482 | |
| 483 | return NULL; |
| 484 | |
| 485 | dispatch_find_request: |
| 486 | /* |
| 487 | * we are not running a batch, find best request for selected data_dir |
| 488 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 489 | next_rq = deadline_next_request(dd, per_prio, data_dir); |
| 490 | if (deadline_check_fifo(per_prio, data_dir) || !next_rq) { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 491 | /* |
| 492 | * A deadline has expired, the last request was in the other |
| 493 | * direction, or we have run out of higher-sectored requests. |
| 494 | * Start again from the request with the earliest expiry time. |
| 495 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 496 | rq = deadline_fifo_request(dd, per_prio, data_dir); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 497 | } else { |
| 498 | /* |
| 499 | * The last req was the same dir and we have a next request in |
| 500 | * sort order. No expired requests so continue on from here. |
| 501 | */ |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 502 | rq = next_rq; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 503 | } |
| 504 | |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 505 | /* |
| 506 | * For a zoned block device, if we only have writes queued and none of |
| 507 | * them can be dispatched, rq will be NULL. |
| 508 | */ |
| 509 | if (!rq) |
| 510 | return NULL; |
| 511 | |
Bart Van Assche | d672d32 | 2021-06-17 17:44:52 -0700 | [diff] [blame] | 512 | dd->last_dir = data_dir; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 513 | dd->batching = 0; |
| 514 | |
| 515 | dispatch_request: |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 516 | if (started_after(dd, rq, latest_start)) |
| 517 | return NULL; |
| 518 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 519 | /* |
| 520 | * rq is the selected appropriate request. |
| 521 | */ |
| 522 | dd->batching++; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 523 | deadline_move_request(dd, per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 524 | done: |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 525 | ioprio_class = dd_rq_ioclass(rq); |
| 526 | prio = ioprio_class_to_prio[ioprio_class]; |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 527 | dd->per_prio[prio].stats.dispatched++; |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 528 | /* |
| 529 | * If the request needs its target zone locked, do it. |
| 530 | */ |
| 531 | blk_req_zone_write_lock(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 532 | rq->rq_flags |= RQF_STARTED; |
| 533 | return rq; |
| 534 | } |
| 535 | |
Jens Axboe | ca11f20 | 2018-01-06 09:23:11 -0700 | [diff] [blame] | 536 | /* |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 537 | * Check whether there are any requests with priority other than DD_RT_PRIO |
| 538 | * that were inserted more than prio_aging_expire jiffies ago. |
| 539 | */ |
| 540 | static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd, |
| 541 | unsigned long now) |
| 542 | { |
| 543 | struct request *rq; |
| 544 | enum dd_prio prio; |
| 545 | int prio_cnt; |
| 546 | |
| 547 | lockdep_assert_held(&dd->lock); |
| 548 | |
| 549 | prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + |
| 550 | !!dd_queued(dd, DD_IDLE_PRIO); |
| 551 | if (prio_cnt < 2) |
| 552 | return NULL; |
| 553 | |
| 554 | for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) { |
| 555 | rq = __dd_dispatch_request(dd, &dd->per_prio[prio], |
| 556 | now - dd->prio_aging_expire); |
| 557 | if (rq) |
| 558 | return rq; |
| 559 | } |
| 560 | |
| 561 | return NULL; |
| 562 | } |
| 563 | |
| 564 | /* |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 565 | * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). |
| 566 | * |
Jens Axboe | ca11f20 | 2018-01-06 09:23:11 -0700 | [diff] [blame] | 567 | * One confusing aspect here is that we get called for a specific |
Damien Le Moal | 7211aef8 | 2018-12-17 15:14:05 +0900 | [diff] [blame] | 568 | * hardware queue, but we may return a request that is for a |
Jens Axboe | ca11f20 | 2018-01-06 09:23:11 -0700 | [diff] [blame] | 569 | * different hardware queue. This is because mq-deadline has shared |
| 570 | * state for all hardware queues, in terms of sorting, FIFOs, etc. |
| 571 | */ |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 572 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 573 | { |
| 574 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 575 | const unsigned long now = jiffies; |
Jens Axboe | 7b05bf7 | 2021-08-26 12:59:44 -0600 | [diff] [blame] | 576 | struct request *rq; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 577 | enum dd_prio prio; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 578 | |
| 579 | spin_lock(&dd->lock); |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 580 | rq = dd_dispatch_prio_aged_requests(dd, now); |
| 581 | if (rq) |
| 582 | goto unlock; |
| 583 | |
| 584 | /* |
| 585 | * Next, dispatch requests in priority order. Ignore lower priority |
| 586 | * requests if any higher priority requests are pending. |
| 587 | */ |
Bart Van Assche | fb926032 | 2021-06-17 17:44:56 -0700 | [diff] [blame] | 588 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 589 | rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now); |
| 590 | if (rq || dd_queued(dd, prio)) |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 591 | break; |
| 592 | } |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 593 | |
| 594 | unlock: |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 595 | spin_unlock(&dd->lock); |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 596 | |
| 597 | return rq; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 598 | } |
| 599 | |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 600 | /* |
| 601 | * Called by __blk_mq_alloc_request(). The shallow_depth value set by this |
| 602 | * function is used by __blk_mq_get_tag(). |
| 603 | */ |
| 604 | static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) |
| 605 | { |
| 606 | struct deadline_data *dd = data->q->elevator->elevator_data; |
| 607 | |
| 608 | /* Do not throttle synchronous reads. */ |
| 609 | if (op_is_sync(op) && !op_is_write(op)) |
| 610 | return; |
| 611 | |
| 612 | /* |
| 613 | * Throttle asynchronous requests and writes such that these requests |
| 614 | * do not block the allocation of synchronous requests. |
| 615 | */ |
| 616 | data->shallow_depth = dd->async_depth; |
| 617 | } |
| 618 | |
| 619 | /* Called by blk_mq_update_nr_requests(). */ |
| 620 | static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) |
| 621 | { |
| 622 | struct request_queue *q = hctx->queue; |
| 623 | struct deadline_data *dd = q->elevator->elevator_data; |
| 624 | struct blk_mq_tags *tags = hctx->sched_tags; |
Zhiguo Niu | d4f95b5 | 2023-08-03 19:12:42 +0800 | [diff] [blame] | 625 | unsigned int shift = tags->bitmap_tags->sb.shift; |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 626 | |
Zhiguo Niu | d4f95b5 | 2023-08-03 19:12:42 +0800 | [diff] [blame] | 627 | dd->async_depth = max(1U, 3 * (1U << shift) / 4); |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 628 | |
| 629 | sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); |
| 630 | } |
| 631 | |
| 632 | /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ |
| 633 | static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) |
| 634 | { |
| 635 | dd_depth_updated(hctx); |
| 636 | return 0; |
| 637 | } |
| 638 | |
Bart Van Assche | 3e9a99e | 2021-06-17 17:44:48 -0700 | [diff] [blame] | 639 | static void dd_exit_sched(struct elevator_queue *e) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 640 | { |
| 641 | struct deadline_data *dd = e->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 642 | enum dd_prio prio; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 643 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 644 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
| 645 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 646 | const struct io_stats_per_prio *stats = &per_prio->stats; |
| 647 | uint32_t queued; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 648 | |
| 649 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); |
| 650 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 651 | |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 652 | spin_lock(&dd->lock); |
| 653 | queued = dd_queued(dd, prio); |
| 654 | spin_unlock(&dd->lock); |
| 655 | |
| 656 | WARN_ONCE(queued != 0, |
| 657 | "statistics for priority %d: i %u m %u d %u c %u\n", |
| 658 | prio, stats->inserted, stats->merged, |
| 659 | stats->dispatched, atomic_read(&stats->completed)); |
| 660 | } |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 661 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 662 | kfree(dd); |
| 663 | } |
| 664 | |
| 665 | /* |
Tejun Heo | 0f78399 | 2021-08-11 07:41:45 -1000 | [diff] [blame] | 666 | * initialize elevator private data (deadline_data). |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 667 | */ |
Bart Van Assche | 3e9a99e | 2021-06-17 17:44:48 -0700 | [diff] [blame] | 668 | static int dd_init_sched(struct request_queue *q, struct elevator_type *e) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 669 | { |
| 670 | struct deadline_data *dd; |
| 671 | struct elevator_queue *eq; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 672 | enum dd_prio prio; |
| 673 | int ret = -ENOMEM; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 674 | |
| 675 | eq = elevator_alloc(q, e); |
| 676 | if (!eq) |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 677 | return ret; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 678 | |
| 679 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 680 | if (!dd) |
| 681 | goto put_eq; |
| 682 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 683 | eq->elevator_data = dd; |
| 684 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 685 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
| 686 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
| 687 | |
| 688 | INIT_LIST_HEAD(&per_prio->dispatch); |
| 689 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); |
| 690 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); |
| 691 | per_prio->sort_list[DD_READ] = RB_ROOT; |
| 692 | per_prio->sort_list[DD_WRITE] = RB_ROOT; |
| 693 | } |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 694 | dd->fifo_expire[DD_READ] = read_expire; |
| 695 | dd->fifo_expire[DD_WRITE] = write_expire; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 696 | dd->writes_starved = writes_starved; |
| 697 | dd->front_merges = 1; |
Bart Van Assche | d672d32 | 2021-06-17 17:44:52 -0700 | [diff] [blame] | 698 | dd->last_dir = DD_WRITE; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 699 | dd->fifo_batch = fifo_batch; |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 700 | dd->prio_aging_expire = prio_aging_expire; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 701 | spin_lock_init(&dd->lock); |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 702 | spin_lock_init(&dd->zone_lock); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 703 | |
| 704 | q->elevator = eq; |
| 705 | return 0; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 706 | |
| 707 | put_eq: |
| 708 | kobject_put(&eq->kobj); |
| 709 | return ret; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 710 | } |
| 711 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 712 | /* |
| 713 | * Try to merge @bio into an existing request. If @bio has been merged into |
| 714 | * an existing request, store the pointer to that request into *@rq. |
| 715 | */ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 716 | static int dd_request_merge(struct request_queue *q, struct request **rq, |
| 717 | struct bio *bio) |
| 718 | { |
| 719 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 720 | const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio); |
| 721 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
| 722 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 723 | sector_t sector = bio_end_sector(bio); |
| 724 | struct request *__rq; |
| 725 | |
| 726 | if (!dd->front_merges) |
| 727 | return ELEVATOR_NO_MERGE; |
| 728 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 729 | __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 730 | if (__rq) { |
| 731 | BUG_ON(sector != blk_rq_pos(__rq)); |
| 732 | |
| 733 | if (elv_bio_merge_ok(__rq, bio)) { |
| 734 | *rq = __rq; |
Ming Lei | 866663b | 2021-07-29 11:42:26 +0800 | [diff] [blame] | 735 | if (blk_discard_mergable(__rq)) |
| 736 | return ELEVATOR_DISCARD_MERGE; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 737 | return ELEVATOR_FRONT_MERGE; |
| 738 | } |
| 739 | } |
| 740 | |
| 741 | return ELEVATOR_NO_MERGE; |
| 742 | } |
| 743 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 744 | /* |
| 745 | * Attempt to merge a bio into an existing request. This function is called |
| 746 | * before @bio is associated with a request. |
| 747 | */ |
Omar Sandoval | efed9a3 | 2021-05-10 17:05:35 -0700 | [diff] [blame] | 748 | static bool dd_bio_merge(struct request_queue *q, struct bio *bio, |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 749 | unsigned int nr_segs) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 750 | { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 751 | struct deadline_data *dd = q->elevator->elevator_data; |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 752 | struct request *free = NULL; |
| 753 | bool ret; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 754 | |
| 755 | spin_lock(&dd->lock); |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 756 | ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 757 | spin_unlock(&dd->lock); |
| 758 | |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 759 | if (free) |
| 760 | blk_mq_free_request(free); |
| 761 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 762 | return ret; |
| 763 | } |
| 764 | |
| 765 | /* |
| 766 | * add rq to rbtree and fifo |
| 767 | */ |
| 768 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
| 769 | bool at_head) |
| 770 | { |
| 771 | struct request_queue *q = hctx->queue; |
| 772 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 773 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 774 | u16 ioprio = req_get_ioprio(rq); |
| 775 | u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); |
| 776 | struct dd_per_prio *per_prio; |
| 777 | enum dd_prio prio; |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 778 | LIST_HEAD(free); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 779 | |
Bart Van Assche | 3bd473f | 2021-06-17 17:44:46 -0700 | [diff] [blame] | 780 | lockdep_assert_held(&dd->lock); |
| 781 | |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 782 | /* |
| 783 | * This may be a requeue of a write request that has locked its |
| 784 | * target zone. If it is the case, this releases the zone lock. |
| 785 | */ |
| 786 | blk_req_zone_write_unlock(rq); |
| 787 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 788 | prio = ioprio_class_to_prio[ioprio_class]; |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 789 | per_prio = &dd->per_prio[prio]; |
Bart Van Assche | b7deb97 | 2021-09-27 15:03:25 -0700 | [diff] [blame] | 790 | if (!rq->elv.priv[0]) { |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 791 | per_prio->stats.inserted++; |
Bart Van Assche | b7deb97 | 2021-09-27 15:03:25 -0700 | [diff] [blame] | 792 | rq->elv.priv[0] = (void *)(uintptr_t)1; |
| 793 | } |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 794 | |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 795 | if (blk_mq_sched_try_insert_merge(q, rq, &free)) { |
| 796 | blk_mq_free_requests(&free); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 797 | return; |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 798 | } |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 799 | |
Chaitanya Kulkarni | b357e4a | 2021-02-21 21:29:59 -0800 | [diff] [blame] | 800 | trace_block_rq_insert(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 801 | |
Lin Feng | 7687b38 | 2021-04-15 11:43:26 +0800 | [diff] [blame] | 802 | if (at_head) { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 803 | list_add(&rq->queuelist, &per_prio->dispatch); |
Bart Van Assche | dac9a9b | 2022-05-13 10:13:07 -0700 | [diff] [blame] | 804 | rq->fifo_time = jiffies; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 805 | } else { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 806 | deadline_add_rq_rb(per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 807 | |
| 808 | if (rq_mergeable(rq)) { |
| 809 | elv_rqhash_add(q, rq); |
| 810 | if (!q->last_merge) |
| 811 | q->last_merge = rq; |
| 812 | } |
| 813 | |
| 814 | /* |
| 815 | * set expire time and add to fifo list |
| 816 | */ |
| 817 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 818 | list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 819 | } |
| 820 | } |
| 821 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 822 | /* |
| 823 | * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). |
| 824 | */ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 825 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, |
| 826 | struct list_head *list, bool at_head) |
| 827 | { |
| 828 | struct request_queue *q = hctx->queue; |
| 829 | struct deadline_data *dd = q->elevator->elevator_data; |
| 830 | |
| 831 | spin_lock(&dd->lock); |
| 832 | while (!list_empty(list)) { |
| 833 | struct request *rq; |
| 834 | |
| 835 | rq = list_first_entry(list, struct request, queuelist); |
| 836 | list_del_init(&rq->queuelist); |
| 837 | dd_insert_request(hctx, rq, at_head); |
| 838 | } |
| 839 | spin_unlock(&dd->lock); |
| 840 | } |
| 841 | |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 842 | /* Callback from inside blk_mq_rq_ctx_init(). */ |
Christoph Hellwig | 5d9c305 | 2020-05-29 15:53:08 +0200 | [diff] [blame] | 843 | static void dd_prepare_request(struct request *rq) |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 844 | { |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 845 | rq->elv.priv[0] = NULL; |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 846 | } |
| 847 | |
Damien Le Moal | 5234dd5 | 2022-11-24 11:12:07 +0900 | [diff] [blame] | 848 | static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx) |
| 849 | { |
| 850 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; |
| 851 | enum dd_prio p; |
| 852 | |
| 853 | for (p = 0; p <= DD_PRIO_MAX; p++) |
| 854 | if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE])) |
| 855 | return true; |
| 856 | |
| 857 | return false; |
| 858 | } |
| 859 | |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 860 | /* |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 861 | * Callback from inside blk_mq_free_request(). |
| 862 | * |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 863 | * For zoned block devices, write unlock the target zone of |
| 864 | * completed write requests. Do this while holding the zone lock |
| 865 | * spinlock so that the zone is never unlocked while deadline_fifo_request() |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 866 | * or deadline_next_request() are executing. This function is called for |
| 867 | * all requests, whether or not these requests complete successfully. |
Damien Le Moal | cb8acab | 2019-08-28 13:40:20 +0900 | [diff] [blame] | 868 | * |
| 869 | * For a zoned block device, __dd_dispatch_request() may have stopped |
| 870 | * dispatching requests if all the queued requests are write requests directed |
| 871 | * at zones that are already locked due to on-going write requests. To ensure |
| 872 | * write request dispatch progress in this case, mark the queue as needing a |
| 873 | * restart to ensure that the queue is run again after completion of the |
| 874 | * request and zones being unlocked. |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 875 | */ |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 876 | static void dd_finish_request(struct request *rq) |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 877 | { |
| 878 | struct request_queue *q = rq->q; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 879 | struct deadline_data *dd = q->elevator->elevator_data; |
| 880 | const u8 ioprio_class = dd_rq_ioclass(rq); |
| 881 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 882 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 883 | |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 884 | /* |
| 885 | * The block layer core may call dd_finish_request() without having |
Bart Van Assche | b7deb97 | 2021-09-27 15:03:25 -0700 | [diff] [blame] | 886 | * called dd_insert_requests(). Skip requests that bypassed I/O |
| 887 | * scheduling. See also blk_mq_request_bypass_insert(). |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 888 | */ |
Bart Van Assche | b7deb97 | 2021-09-27 15:03:25 -0700 | [diff] [blame] | 889 | if (!rq->elv.priv[0]) |
| 890 | return; |
| 891 | |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 892 | atomic_inc(&per_prio->stats.completed); |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 893 | |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 894 | if (blk_queue_is_zoned(q)) { |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 895 | unsigned long flags; |
| 896 | |
| 897 | spin_lock_irqsave(&dd->zone_lock, flags); |
| 898 | blk_req_zone_write_unlock(rq); |
| 899 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
Damien Le Moal | 5234dd5 | 2022-11-24 11:12:07 +0900 | [diff] [blame] | 900 | |
| 901 | if (dd_has_write_work(rq->mq_hctx)) |
| 902 | blk_mq_sched_mark_restart_hctx(rq->mq_hctx); |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 903 | } |
| 904 | } |
| 905 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 906 | static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) |
| 907 | { |
| 908 | return !list_empty_careful(&per_prio->dispatch) || |
| 909 | !list_empty_careful(&per_prio->fifo_list[DD_READ]) || |
| 910 | !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); |
| 911 | } |
| 912 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 913 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) |
| 914 | { |
| 915 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 916 | enum dd_prio prio; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 917 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 918 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) |
| 919 | if (dd_has_work_for_prio(&dd->per_prio[prio])) |
| 920 | return true; |
| 921 | |
| 922 | return false; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 923 | } |
| 924 | |
| 925 | /* |
| 926 | * sysfs parts below |
| 927 | */ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 928 | #define SHOW_INT(__FUNC, __VAR) \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 929 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
| 930 | { \ |
| 931 | struct deadline_data *dd = e->elevator_data; \ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 932 | \ |
| 933 | return sysfs_emit(page, "%d\n", __VAR); \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 934 | } |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 935 | #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) |
| 936 | SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); |
| 937 | SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 938 | SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire); |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 939 | SHOW_INT(deadline_writes_starved_show, dd->writes_starved); |
| 940 | SHOW_INT(deadline_front_merges_show, dd->front_merges); |
Jens Axboe | 269fbc2 | 2022-01-20 10:28:13 -0700 | [diff] [blame] | 941 | SHOW_INT(deadline_async_depth_show, dd->async_depth); |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 942 | SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); |
| 943 | #undef SHOW_INT |
| 944 | #undef SHOW_JIFFIES |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 945 | |
| 946 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
| 947 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
| 948 | { \ |
| 949 | struct deadline_data *dd = e->elevator_data; \ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 950 | int __data, __ret; \ |
| 951 | \ |
| 952 | __ret = kstrtoint(page, 0, &__data); \ |
| 953 | if (__ret < 0) \ |
| 954 | return __ret; \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 955 | if (__data < (MIN)) \ |
| 956 | __data = (MIN); \ |
| 957 | else if (__data > (MAX)) \ |
| 958 | __data = (MAX); \ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 959 | *(__PTR) = __CONV(__data); \ |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 960 | return count; \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 961 | } |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 962 | #define STORE_INT(__FUNC, __PTR, MIN, MAX) \ |
| 963 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, ) |
| 964 | #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \ |
| 965 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) |
| 966 | STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); |
| 967 | STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 968 | STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX); |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 969 | STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); |
| 970 | STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); |
Jens Axboe | 269fbc2 | 2022-01-20 10:28:13 -0700 | [diff] [blame] | 971 | STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 972 | STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 973 | #undef STORE_FUNCTION |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 974 | #undef STORE_INT |
| 975 | #undef STORE_JIFFIES |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 976 | |
| 977 | #define DD_ATTR(name) \ |
Joe Perches | 5657a81 | 2018-05-24 13:38:59 -0600 | [diff] [blame] | 978 | __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 979 | |
| 980 | static struct elv_fs_entry deadline_attrs[] = { |
| 981 | DD_ATTR(read_expire), |
| 982 | DD_ATTR(write_expire), |
| 983 | DD_ATTR(writes_starved), |
| 984 | DD_ATTR(front_merges), |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 985 | DD_ATTR(async_depth), |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 986 | DD_ATTR(fifo_batch), |
Bart Van Assche | fcc3046 | 2021-09-27 15:03:28 -0700 | [diff] [blame] | 987 | DD_ATTR(prio_aging_expire), |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 988 | __ATTR_NULL |
| 989 | }; |
| 990 | |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 991 | #ifdef CONFIG_BLK_DEBUG_FS |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 992 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 993 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ |
| 994 | loff_t *pos) \ |
| 995 | __acquires(&dd->lock) \ |
| 996 | { \ |
| 997 | struct request_queue *q = m->private; \ |
| 998 | struct deadline_data *dd = q->elevator->elevator_data; \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 999 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1000 | \ |
| 1001 | spin_lock(&dd->lock); \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1002 | return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1003 | } \ |
| 1004 | \ |
| 1005 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ |
| 1006 | loff_t *pos) \ |
| 1007 | { \ |
| 1008 | struct request_queue *q = m->private; \ |
| 1009 | struct deadline_data *dd = q->elevator->elevator_data; \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1010 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1011 | \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1012 | return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1013 | } \ |
| 1014 | \ |
| 1015 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ |
| 1016 | __releases(&dd->lock) \ |
| 1017 | { \ |
| 1018 | struct request_queue *q = m->private; \ |
| 1019 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 1020 | \ |
| 1021 | spin_unlock(&dd->lock); \ |
| 1022 | } \ |
| 1023 | \ |
| 1024 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ |
| 1025 | .start = deadline_##name##_fifo_start, \ |
| 1026 | .next = deadline_##name##_fifo_next, \ |
| 1027 | .stop = deadline_##name##_fifo_stop, \ |
| 1028 | .show = blk_mq_debugfs_rq_show, \ |
| 1029 | }; \ |
| 1030 | \ |
| 1031 | static int deadline_##name##_next_rq_show(void *data, \ |
| 1032 | struct seq_file *m) \ |
| 1033 | { \ |
| 1034 | struct request_queue *q = data; \ |
| 1035 | struct deadline_data *dd = q->elevator->elevator_data; \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1036 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
| 1037 | struct request *rq = per_prio->next_rq[data_dir]; \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1038 | \ |
| 1039 | if (rq) \ |
| 1040 | __blk_mq_debugfs_rq_show(m, rq); \ |
| 1041 | return 0; \ |
| 1042 | } |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1043 | |
| 1044 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0); |
| 1045 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0); |
| 1046 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1); |
| 1047 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1); |
| 1048 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2); |
| 1049 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2); |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1050 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS |
| 1051 | |
| 1052 | static int deadline_batching_show(void *data, struct seq_file *m) |
| 1053 | { |
| 1054 | struct request_queue *q = data; |
| 1055 | struct deadline_data *dd = q->elevator->elevator_data; |
| 1056 | |
| 1057 | seq_printf(m, "%u\n", dd->batching); |
| 1058 | return 0; |
| 1059 | } |
| 1060 | |
| 1061 | static int deadline_starved_show(void *data, struct seq_file *m) |
| 1062 | { |
| 1063 | struct request_queue *q = data; |
| 1064 | struct deadline_data *dd = q->elevator->elevator_data; |
| 1065 | |
| 1066 | seq_printf(m, "%u\n", dd->starved); |
| 1067 | return 0; |
| 1068 | } |
| 1069 | |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 1070 | static int dd_async_depth_show(void *data, struct seq_file *m) |
| 1071 | { |
| 1072 | struct request_queue *q = data; |
| 1073 | struct deadline_data *dd = q->elevator->elevator_data; |
| 1074 | |
| 1075 | seq_printf(m, "%u\n", dd->async_depth); |
| 1076 | return 0; |
| 1077 | } |
| 1078 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1079 | static int dd_queued_show(void *data, struct seq_file *m) |
| 1080 | { |
| 1081 | struct request_queue *q = data; |
| 1082 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 1083 | u32 rt, be, idle; |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1084 | |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 1085 | spin_lock(&dd->lock); |
| 1086 | rt = dd_queued(dd, DD_RT_PRIO); |
| 1087 | be = dd_queued(dd, DD_BE_PRIO); |
| 1088 | idle = dd_queued(dd, DD_IDLE_PRIO); |
| 1089 | spin_unlock(&dd->lock); |
| 1090 | |
| 1091 | seq_printf(m, "%u %u %u\n", rt, be, idle); |
| 1092 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1093 | return 0; |
| 1094 | } |
| 1095 | |
| 1096 | /* Number of requests owned by the block driver for a given priority. */ |
| 1097 | static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) |
| 1098 | { |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 1099 | const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; |
| 1100 | |
| 1101 | lockdep_assert_held(&dd->lock); |
| 1102 | |
| 1103 | return stats->dispatched + stats->merged - |
| 1104 | atomic_read(&stats->completed); |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1105 | } |
| 1106 | |
| 1107 | static int dd_owned_by_driver_show(void *data, struct seq_file *m) |
| 1108 | { |
| 1109 | struct request_queue *q = data; |
| 1110 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 1111 | u32 rt, be, idle; |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1112 | |
Bart Van Assche | 8ecb51c | 2021-09-27 15:03:27 -0700 | [diff] [blame] | 1113 | spin_lock(&dd->lock); |
| 1114 | rt = dd_owned_by_driver(dd, DD_RT_PRIO); |
| 1115 | be = dd_owned_by_driver(dd, DD_BE_PRIO); |
| 1116 | idle = dd_owned_by_driver(dd, DD_IDLE_PRIO); |
| 1117 | spin_unlock(&dd->lock); |
| 1118 | |
| 1119 | seq_printf(m, "%u %u %u\n", rt, be, idle); |
| 1120 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1121 | return 0; |
| 1122 | } |
| 1123 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1124 | #define DEADLINE_DISPATCH_ATTR(prio) \ |
| 1125 | static void *deadline_dispatch##prio##_start(struct seq_file *m, \ |
| 1126 | loff_t *pos) \ |
| 1127 | __acquires(&dd->lock) \ |
| 1128 | { \ |
| 1129 | struct request_queue *q = m->private; \ |
| 1130 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 1131 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
| 1132 | \ |
| 1133 | spin_lock(&dd->lock); \ |
| 1134 | return seq_list_start(&per_prio->dispatch, *pos); \ |
| 1135 | } \ |
| 1136 | \ |
| 1137 | static void *deadline_dispatch##prio##_next(struct seq_file *m, \ |
| 1138 | void *v, loff_t *pos) \ |
| 1139 | { \ |
| 1140 | struct request_queue *q = m->private; \ |
| 1141 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 1142 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
| 1143 | \ |
| 1144 | return seq_list_next(v, &per_prio->dispatch, pos); \ |
| 1145 | } \ |
| 1146 | \ |
| 1147 | static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \ |
| 1148 | __releases(&dd->lock) \ |
| 1149 | { \ |
| 1150 | struct request_queue *q = m->private; \ |
| 1151 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 1152 | \ |
| 1153 | spin_unlock(&dd->lock); \ |
| 1154 | } \ |
| 1155 | \ |
| 1156 | static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \ |
| 1157 | .start = deadline_dispatch##prio##_start, \ |
| 1158 | .next = deadline_dispatch##prio##_next, \ |
| 1159 | .stop = deadline_dispatch##prio##_stop, \ |
| 1160 | .show = blk_mq_debugfs_rq_show, \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1161 | } |
| 1162 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1163 | DEADLINE_DISPATCH_ATTR(0); |
| 1164 | DEADLINE_DISPATCH_ATTR(1); |
| 1165 | DEADLINE_DISPATCH_ATTR(2); |
| 1166 | #undef DEADLINE_DISPATCH_ATTR |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1167 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1168 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ |
| 1169 | {#name "_fifo_list", 0400, \ |
| 1170 | .seq_ops = &deadline_##name##_fifo_seq_ops} |
| 1171 | #define DEADLINE_NEXT_RQ_ATTR(name) \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1172 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} |
| 1173 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1174 | DEADLINE_QUEUE_DDIR_ATTRS(read0), |
| 1175 | DEADLINE_QUEUE_DDIR_ATTRS(write0), |
| 1176 | DEADLINE_QUEUE_DDIR_ATTRS(read1), |
| 1177 | DEADLINE_QUEUE_DDIR_ATTRS(write1), |
| 1178 | DEADLINE_QUEUE_DDIR_ATTRS(read2), |
| 1179 | DEADLINE_QUEUE_DDIR_ATTRS(write2), |
| 1180 | DEADLINE_NEXT_RQ_ATTR(read0), |
| 1181 | DEADLINE_NEXT_RQ_ATTR(write0), |
| 1182 | DEADLINE_NEXT_RQ_ATTR(read1), |
| 1183 | DEADLINE_NEXT_RQ_ATTR(write1), |
| 1184 | DEADLINE_NEXT_RQ_ATTR(read2), |
| 1185 | DEADLINE_NEXT_RQ_ATTR(write2), |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1186 | {"batching", 0400, deadline_batching_show}, |
| 1187 | {"starved", 0400, deadline_starved_show}, |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 1188 | {"async_depth", 0400, dd_async_depth_show}, |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1189 | {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, |
| 1190 | {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, |
| 1191 | {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1192 | {"owned_by_driver", 0400, dd_owned_by_driver_show}, |
| 1193 | {"queued", 0400, dd_queued_show}, |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1194 | {}, |
| 1195 | }; |
| 1196 | #undef DEADLINE_QUEUE_DDIR_ATTRS |
| 1197 | #endif |
| 1198 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1199 | static struct elevator_type mq_deadline = { |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 1200 | .ops = { |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 1201 | .depth_updated = dd_depth_updated, |
| 1202 | .limit_depth = dd_limit_depth, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1203 | .insert_requests = dd_insert_requests, |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 1204 | .dispatch_request = dd_dispatch_request, |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 1205 | .prepare_request = dd_prepare_request, |
| 1206 | .finish_request = dd_finish_request, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1207 | .next_request = elv_rb_latter_request, |
| 1208 | .former_request = elv_rb_former_request, |
| 1209 | .bio_merge = dd_bio_merge, |
| 1210 | .request_merge = dd_request_merge, |
| 1211 | .requests_merged = dd_merged_requests, |
| 1212 | .request_merged = dd_request_merged, |
| 1213 | .has_work = dd_has_work, |
Bart Van Assche | 3e9a99e | 2021-06-17 17:44:48 -0700 | [diff] [blame] | 1214 | .init_sched = dd_init_sched, |
| 1215 | .exit_sched = dd_exit_sched, |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 1216 | .init_hctx = dd_init_hctx, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1217 | }, |
| 1218 | |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1219 | #ifdef CONFIG_BLK_DEBUG_FS |
| 1220 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, |
| 1221 | #endif |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1222 | .elevator_attrs = deadline_attrs, |
| 1223 | .elevator_name = "mq-deadline", |
Jens Axboe | 4d740bc | 2017-10-25 09:47:20 -0600 | [diff] [blame] | 1224 | .elevator_alias = "deadline", |
Damien Le Moal | 68c43f1 | 2019-09-05 18:51:31 +0900 | [diff] [blame] | 1225 | .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1226 | .elevator_owner = THIS_MODULE, |
| 1227 | }; |
Ben Hutchings | 7de967e | 2017-08-13 18:03:15 +0100 | [diff] [blame] | 1228 | MODULE_ALIAS("mq-deadline-iosched"); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1229 | |
| 1230 | static int __init deadline_init(void) |
| 1231 | { |
Tejun Heo | 0f78399 | 2021-08-11 07:41:45 -1000 | [diff] [blame] | 1232 | return elv_register(&mq_deadline); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1233 | } |
| 1234 | |
| 1235 | static void __exit deadline_exit(void) |
| 1236 | { |
| 1237 | elv_unregister(&mq_deadline); |
| 1238 | } |
| 1239 | |
| 1240 | module_init(deadline_init); |
| 1241 | module_exit(deadline_exit); |
| 1242 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1243 | MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche"); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1244 | MODULE_LICENSE("GPL"); |
| 1245 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |