Skip to content
Snippets Groups Projects
Commit 0b89bf94 authored by Ming Lei's avatar Ming Lei Committed by Yang Yingliang
Browse files

blk-mq: fix is_flush_rq

mainline inclusion
from mainline-v5.14-rc7
commit a9ed27a764156929efe714033edb3e9023c5f321
category: bugfix
bugzilla: 34280, https://gitee.com/openeuler/kernel/issues/I4AKY4


CVE: NA

-----------------------------------------------

is_flush_rq() is called from bt_iter()/bt_tags_iter(), and runs the
following check:

	hctx->fq->flush_rq == req

but the passed hctx from bt_iter()/bt_tags_iter() may be NULL because:

1) memory re-order in blk_mq_rq_ctx_init():

	rq->mq_hctx = data->hctx;
	...
	refcount_set(&rq->ref, 1);

OR

2) tag re-use and ->rqs[] isn't updated with new request.

Fix the issue by re-writing is_flush_rq() as:

	return rq->end_io == flush_end_io;

which turns out simpler to follow and immune to data race since we have
ordered WRITE rq->end_io and refcount_set(&rq->ref, 1).

Fixes: 2e315dc07df0 ("blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter")
Cc: "Blank-Burian, Markus, Dr." <blankburian@uni-muenster.de>
Cc: Yufen Yu <yuyufen@huawei.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20210818010925.607383-1-ming.lei@redhat.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>

conflicts:
 - rq->mq_hctx do not exist, however the problem still exist
 because we are using rq->mq_ctx to get hctx.
 - the second parameter 'hctx' of blk_mq_put_rq_ref() can be removed.
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarHou Tao <houtao1@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parent 22d1f86b
No related branches found
No related tags found
No related merge requests found
......@@ -295,6 +295,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}
bool is_flush_rq(struct request *rq)
{
return rq->end_io == flush_end_io;
}
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
......
......@@ -255,7 +255,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
if (rq->q == hctx->queue)
iter_data->fn(hctx, rq, iter_data->data, reserved);
blk_mq_put_rq_ref(rq, hctx);
blk_mq_put_rq_ref(rq);
return true;
}
......@@ -299,7 +299,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
if (blk_mq_request_started(rq))
iter_data->fn(rq, iter_data->data, reserved);
blk_mq_put_rq_ref(rq, blk_mq_map_queue(rq->q, rq->mq_ctx->cpu));
blk_mq_put_rq_ref(rq);
return true;
}
......
......@@ -854,9 +854,9 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
return false;
}
void blk_mq_put_rq_ref(struct request *rq, struct blk_mq_hw_ctx *hctx)
void blk_mq_put_rq_ref(struct request *rq)
{
if (is_flush_rq(rq, hctx))
if (is_flush_rq(rq))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
......
......@@ -45,7 +45,7 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
void blk_mq_put_rq_ref(struct request *rq, struct blk_mq_hw_ctx *hctx);
void blk_mq_put_rq_ref(struct request *rq);
/*
* Internal helpers for allocating/freeing the request map
......
......@@ -140,11 +140,7 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
static inline bool
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
return hctx->fq->flush_rq == req;
}
bool is_flush_rq(struct request *req);
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment