blk-mq: factor out a blk_mq_alloc_sq_tag_set helper
Factour out a helper to initialize a simple single hw queue tag_set from blk_mq_init_sq_queue. This will allow to phase out blk_mq_init_sq_queue in favor of a more symmetric and general API. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Link: https://lore.kernel.org/r/20210602065345.355274-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
a624eb5203
commit
cdb14e0f77
@@ -3152,26 +3152,14 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
|
|||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
memset(set, 0, sizeof(*set));
|
ret = blk_mq_alloc_sq_tag_set(set, ops, queue_depth, set_flags);
|
||||||
set->ops = ops;
|
|
||||||
set->nr_hw_queues = 1;
|
|
||||||
set->nr_maps = 1;
|
|
||||||
set->queue_depth = queue_depth;
|
|
||||||
set->numa_node = NUMA_NO_NODE;
|
|
||||||
set->flags = set_flags;
|
|
||||||
|
|
||||||
ret = blk_mq_alloc_tag_set(set);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
q = blk_mq_init_queue(set);
|
q = blk_mq_init_queue(set);
|
||||||
if (IS_ERR(q)) {
|
if (IS_ERR(q))
|
||||||
blk_mq_free_tag_set(set);
|
blk_mq_free_tag_set(set);
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
|
|
||||||
return q;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_mq_init_sq_queue);
|
EXPORT_SYMBOL(blk_mq_init_sq_queue);
|
||||||
|
|
||||||
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
|
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
|
||||||
@@ -3589,6 +3577,22 @@ out_free_mq_map:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_alloc_tag_set);
|
EXPORT_SYMBOL(blk_mq_alloc_tag_set);
|
||||||
|
|
||||||
|
/* allocate and initialize a tagset for a simple single-queue device */
|
||||||
|
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
|
||||||
|
const struct blk_mq_ops *ops, unsigned int queue_depth,
|
||||||
|
unsigned int set_flags)
|
||||||
|
{
|
||||||
|
memset(set, 0, sizeof(*set));
|
||||||
|
set->ops = ops;
|
||||||
|
set->nr_hw_queues = 1;
|
||||||
|
set->nr_maps = 1;
|
||||||
|
set->queue_depth = queue_depth;
|
||||||
|
set->numa_node = NUMA_NO_NODE;
|
||||||
|
set->flags = set_flags;
|
||||||
|
return blk_mq_alloc_tag_set(set);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
|
||||||
|
|
||||||
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
|
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|||||||
@@ -439,6 +439,9 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
|
|||||||
void blk_mq_unregister_dev(struct device *, struct request_queue *);
|
void blk_mq_unregister_dev(struct device *, struct request_queue *);
|
||||||
|
|
||||||
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
|
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
|
||||||
|
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
|
||||||
|
const struct blk_mq_ops *ops, unsigned int queue_depth,
|
||||||
|
unsigned int set_flags);
|
||||||
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
|
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
|
||||||
|
|
||||||
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
||||||
|
|||||||
Reference in New Issue
Block a user