Skip to content
Snippets Groups Projects
Commit b6a187ae authored by 余快's avatar 余快 Committed by Yongqiang Liu
Browse files

block: fix kabi broken in request_queue

hulk inclusion
category: performance
bugzilla: 187597, https://gitee.com/openeuler/kernel/issues/I5QK5M


CVE: NA

--------------------------------

request_queue_wrapper is not accessible in drivers currently,
introduce a new helper to initialize async dispatch to fix kabi broken.

Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarJason Yan <yanaijie@huawei.com>
Signed-off-by: default avatarYongqiang Liu <liuyongqiang13@huawei.com>
parent 8934afb9
No related branches found
No related tags found
No related merge requests found
......@@ -35,6 +35,7 @@
#include <linux/blk-cgroup.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
#include <linux/arch_topology.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
......@@ -86,6 +87,8 @@ struct kmem_cache *blk_requestq_cachep;
static struct workqueue_struct *kblockd_workqueue;
#define BIO_DISPATCH_MAX_LOOP 16
/* the minimum of cpus that dispatch async can be enabled */
#define MIN_DISPATCH_ASYNC_CPUS 16
/* prevent false sharing */
#define BIO_ASYNC_LIST_SHIFT 2
......@@ -112,15 +115,16 @@ static struct bio_dispatch_async_ctl __percpu **bio_dispatch_async_ctl;
static int blk_alloc_queue_dispatch_async(struct request_queue *q)
{
struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q);
int cpu;
q->last_dispatch_cpu = alloc_percpu(int);
if (!q->last_dispatch_cpu)
q_wrapper->last_dispatch_cpu = alloc_percpu(int);
if (!q_wrapper->last_dispatch_cpu)
return -ENOMEM;
cpumask_setall(&q->dispatch_async_cpus);
cpumask_setall(&q_wrapper->dispatch_async_cpus);
for_each_possible_cpu(cpu) {
*per_cpu_ptr(q->last_dispatch_cpu, cpu) = cpu;
*per_cpu_ptr(q_wrapper->last_dispatch_cpu, cpu) = cpu;
}
return 0;
......@@ -128,7 +132,7 @@ static int blk_alloc_queue_dispatch_async(struct request_queue *q)
void blk_free_queue_dispatch_async(struct request_queue *q)
{
free_percpu(q->last_dispatch_cpu);
free_percpu(queue_to_wrapper(q)->last_dispatch_cpu);
}
static int collect_bio(struct bio_dispatch_async_ctl *ctl,
......@@ -202,11 +206,14 @@ static int bio_dispatch_work(void *data)
static int get_dispatch_cpu(struct request_queue *q, int cpu)
{
int *last_dispatch_cpu = per_cpu_ptr(q->last_dispatch_cpu, cpu);
int *last_dispatch_cpu =
per_cpu_ptr(queue_to_wrapper(q)->last_dispatch_cpu, cpu);
struct cpumask *dispatch_async_cpus =
&queue_to_wrapper(q)->dispatch_async_cpus;
cpu = cpumask_next(*last_dispatch_cpu, &q->dispatch_async_cpus);
cpu = cpumask_next(*last_dispatch_cpu, dispatch_async_cpus);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(&q->dispatch_async_cpus);
cpu = cpumask_first(dispatch_async_cpus);
*last_dispatch_cpu = cpu;
......@@ -243,7 +250,7 @@ static blk_qc_t blk_queue_do_make_request(struct bio *bio)
* 4) TODO: return value of submit_bio() will be used in io polling.
*/
if (!test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags) ||
cpumask_test_cpu(cpu, &q->dispatch_async_cpus) ||
cpumask_test_cpu(cpu, &queue_to_wrapper(q)->dispatch_async_cpus) ||
bio->bi_opf & REQ_NOWAIT)
return q->make_request_fn(q, bio);
......@@ -290,6 +297,19 @@ static void init_blk_queue_async_dispatch(void)
}
}
void queue_init_dispatch_async_cpus(struct request_queue *q, int node)
{
struct cpumask *dispatch_async_cpus =
&queue_to_wrapper(q)->dispatch_async_cpus;
arch_get_preferred_sibling_cpumask(node, dispatch_async_cpus);
if (cpumask_weight(dispatch_async_cpus) >= MIN_DISPATCH_ASYNC_CPUS)
blk_queue_flag_set(QUEUE_FLAG_DISPATCH_ASYNC, q);
else
cpumask_setall(dispatch_async_cpus);
}
EXPORT_SYMBOL_GPL(queue_init_dispatch_async_cpus);
/**
* blk_queue_flag_set - atomically set a queue flag
* @flag: flag to be set
......
......@@ -705,7 +705,7 @@ static ssize_t queue_dispatch_async_cpus_show(struct request_queue *q,
if (!test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags))
return -EOPNOTSUPP;
for_each_cpu(cpu, &q->dispatch_async_cpus) {
for_each_cpu(cpu, &queue_to_wrapper(q)->dispatch_async_cpus) {
ret += sprintf(page + ret, "%d ", cpu);
}
......
......@@ -48,6 +48,10 @@ struct request_queue_wrapper {
*/
struct mutex mq_freeze_lock;
int mq_freeze_depth;
/* used when QUEUE_FLAG_DISPATCH_ASYNC is set */
struct cpumask dispatch_async_cpus;
int __percpu *last_dispatch_cpu;
};
#define queue_to_wrapper(q) \
......
......@@ -66,7 +66,6 @@
#include <linux/raid/md_u.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
#include <linux/arch_topology.h>
#include <trace/events/block.h>
#include "md.h"
......@@ -5544,16 +5543,6 @@ static void md_safemode_timeout(struct timer_list *t)
static int start_dirty_degraded;
#define MIN_DISPATCH_ASYNC_CPUS 16
static void queue_init_dispatch_async_cpus(struct request_queue *q, int node)
{
arch_get_preferred_sibling_cpumask(node, &q->dispatch_async_cpus);
if (cpumask_weight(&q->dispatch_async_cpus) >= MIN_DISPATCH_ASYNC_CPUS)
blk_queue_flag_set(QUEUE_FLAG_DISPATCH_ASYNC, q);
else
cpumask_setall(&q->dispatch_async_cpus);
}
int md_run(struct mddev *mddev)
{
int err;
......
......@@ -701,10 +701,6 @@ struct request_queue {
struct work_struct release_work;
/* used when QUEUE_FLAG_DISPATCH_ASYNC is set */
struct cpumask dispatch_async_cpus;
int __percpu *last_dispatch_cpu;
#define BLK_MAX_WRITE_HINTS 5
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
......@@ -789,6 +785,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
extern void queue_init_dispatch_async_cpus(struct request_queue *q, int node);
static inline int queue_in_flight(struct request_queue *q)
{
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment