blk-mq: bypass IO scheduler's limit_depth for passthrough request
Commit 01e99aeca3 ("blk-mq: insert passthrough request into
hctx->dispatch directly") gives high priority to passthrough requests and
bypass underlying IO scheduler. But as we allocate tag for such request it
still runs io-scheduler's callback limit_depth, while we really want is to
give full sbitmap-depth capabity to such request for acquiring available
tag.
blktrace shows PC requests(dmraid -s -c -i) hit bfq's limit_depth:
8,0 2 0 0.000000000 39952 1,0 m N bfq [bfq_limit_depth] wr_busy 0 sync 0 depth 8
8,0 2 1 0.000008134 39952 D R 4 [dmraid]
8,0 2 2 0.000021538 24 C R [0]
8,0 2 0 0.000035442 39952 1,0 m N bfq [bfq_limit_depth] wr_busy 0 sync 0 depth 8
8,0 2 3 0.000038813 39952 D R 24 [dmraid]
8,0 2 4 0.000044356 24 C R [0]
This patch introduce a new wrapper to make code not that ugly.
Signed-off-by: Lin Feng <linf@wangsu.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20210415033920.213963-1-linf@wangsu.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -361,11 +361,12 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
|||||||
|
|
||||||
if (e) {
|
if (e) {
|
||||||
/*
|
/*
|
||||||
* Flush requests are special and go directly to the
|
* Flush/passthrough requests are special and go directly to the
|
||||||
* dispatch list. Don't include reserved tags in the
|
* dispatch list. Don't include reserved tags in the
|
||||||
* limiting, as it isn't useful.
|
* limiting, as it isn't useful.
|
||||||
*/
|
*/
|
||||||
if (!op_is_flush(data->cmd_flags) &&
|
if (!op_is_flush(data->cmd_flags) &&
|
||||||
|
!blk_op_is_passthrough(data->cmd_flags) &&
|
||||||
e->type->ops.limit_depth &&
|
e->type->ops.limit_depth &&
|
||||||
!(data->flags & BLK_MQ_REQ_RESERVED))
|
!(data->flags & BLK_MQ_REQ_RESERVED))
|
||||||
e->type->ops.limit_depth(data->cmd_flags, data);
|
e->type->ops.limit_depth(data->cmd_flags, data);
|
||||||
|
|||||||
@@ -274,6 +274,12 @@ static inline bool bio_is_passthrough(struct bio *bio)
|
|||||||
return blk_op_is_scsi(op) || blk_op_is_private(op);
|
return blk_op_is_scsi(op) || blk_op_is_private(op);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool blk_op_is_passthrough(unsigned int op)
|
||||||
|
{
|
||||||
|
return (blk_op_is_scsi(op & REQ_OP_MASK) ||
|
||||||
|
blk_op_is_private(op & REQ_OP_MASK));
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned short req_get_ioprio(struct request *req)
|
static inline unsigned short req_get_ioprio(struct request *req)
|
||||||
{
|
{
|
||||||
return req->ioprio;
|
return req->ioprio;
|
||||||
|
|||||||
Reference in New Issue
Block a user