diff --git a/block/blk-flush.c b/block/blk-flush.c
index c87a0ffba9c74b9c6fe7e135b90919b9370c5594..022a2bbb012d7cfa53fff946734776838303abf6 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -295,6 +295,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
 		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 }
 
+bool is_flush_rq(struct request *rq)
+{
+	return rq->end_io == flush_end_io;
+}
+
 /**
  * blk_kick_flush - consider issuing flush request
  * @q: request_queue being kicked
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 04986f7d874060d58988c5dc7d2f28d6e33fa5f8..5c740716c5e5a491f86378c903c350cc934945b4 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -255,7 +255,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 
 	if (rq->q == hctx->queue)
 		iter_data->fn(hctx, rq, iter_data->data, reserved);
-	blk_mq_put_rq_ref(rq, hctx);
+	blk_mq_put_rq_ref(rq);
 	return true;
 }
 
@@ -299,7 +299,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 
 	if (blk_mq_request_started(rq))
 		iter_data->fn(rq, iter_data->data, reserved);
-	blk_mq_put_rq_ref(rq, blk_mq_map_queue(rq->q, rq->mq_ctx->cpu));
+	blk_mq_put_rq_ref(rq);
 
 	return true;
 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ed1d9bd917c23fee045aa64ba7fd97c5e5e68062..6da37cbb975c955ff4dd1266838e8cca33eda6f8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -854,9 +854,9 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 	return false;
 }
 
-void blk_mq_put_rq_ref(struct request *rq, struct blk_mq_hw_ctx *hctx)
+void blk_mq_put_rq_ref(struct request *rq)
 {
-	if (is_flush_rq(rq, hctx))
+	if (is_flush_rq(rq))
 		rq->end_io(rq, 0);
 	else if (refcount_dec_and_test(&rq->ref))
 		__blk_mq_free_request(rq);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 1fc3debe69f94758240146ade5d926b509b481b8..bbb0c1d8849b4e5a7068ed82896611408c02d865 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -45,7 +45,7 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 bool blk_mq_get_driver_tag(struct request *rq);
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 					struct blk_mq_ctx *start);
-void blk_mq_put_rq_ref(struct request *rq, struct blk_mq_hw_ctx *hctx);
+void blk_mq_put_rq_ref(struct request *rq);
 
 /*
  * Internal helpers for allocating/freeing the request map
diff --git a/block/blk.h b/block/blk.h
index 62464a303b3b0f7b6f4313696e57bdc9e1baa937..c2fa239ca78f27d7734a04338379c046673e9d6e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -140,11 +140,7 @@ static inline void __blk_get_queue(struct request_queue *q)
 	kobject_get(&q->kobj);
 }
 
-static inline bool
-is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
-{
-	return hctx->fq->flush_rq == req;
-}
+bool is_flush_rq(struct request *req);
 
 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
 		int node, int cmd_size, gfp_t flags);