blk-mq: Relocate hctx_may_queue()
blk-mq.h and blk-mq-tag.h include on each other, which is less than ideal.
Locate hctx_may_queue() to blk-mq.h, as it is not really tag specific code.
In this way, we can drop the blk-mq-tag.h include of blk-mq.h
Signed-off-by: John Garry <[email protected]>
Tested-by: Douglas Gilbert <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 3acdb56..56dc37c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -259,4 +259,36 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
return NULL;
}
+/*
+ * For shared tag users, we track the number of currently active users
+ * and attempt to provide a fair share of the tag depth for each of them.
+ */
+static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+ struct sbitmap_queue *bt)
+{
+ unsigned int depth, users;
+
+ if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
+ return true;
+ if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ return true;
+
+ /*
+ * Don't try dividing an ant
+ */
+ if (bt->sb.depth == 1)
+ return true;
+
+ users = atomic_read(&hctx->tags->active_queues);
+ if (!users)
+ return true;
+
+ /*
+ * Allow at least some tags
+ */
+ depth = max((bt->sb.depth + users - 1) / users, 4U);
+ return atomic_read(&hctx->nr_active) < depth;
+}
+
+
#endif