Skip to content
Snippets Groups Projects
Commit f82f6d68 authored by 余快's avatar 余快 Committed by Yongqiang Liu
Browse files

md/raid10: convert resync_lock to use seqlock

mainline inclusion
from md-next
commit ddc489e066cd267b383c0eed4f576f6bdb154588
category: performance
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I5PRMO
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/song/md.git/commit/?h=md-next&id=ddc489e066cd267b383c0eed4f576f6bdb154588



---------------------

Currently, wait_barrier() will hold 'resync_lock' to read 'conf->barrier',
and io can't be dispatched until 'barrier' is dropped.

Since holding the 'barrier' is not common, convert 'resync_lock' to use
seqlock so that holding lock can be avoided in fast path.

Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-and-tested-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Reviewed-by: default avatarJason Yan <yanaijie@huawei.com>
Signed-off-by: default avatarYongqiang Liu <liuyongqiang13@huawei.com>
parent 1668533d
No related branches found
No related tags found
No related merge requests found
......@@ -112,6 +112,21 @@ static void end_reshape(struct r10conf *conf);
#include "raid1-10.c"
#define NULL_CMD
#define cmd_before(conf, cmd) \
do { \
write_sequnlock_irq(&(conf)->resync_lock); \
cmd; \
} while (0)
#define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
#define wait_event_barrier_cmd(conf, cond, cmd) \
wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
cmd_after(conf))
#define wait_event_barrier(conf, cond) \
wait_event_barrier_cmd(conf, cond, NULL_CMD)
/*
* for resync bio, r10bio pointer can be retrieved from the per-bio
* 'struct resync_pages'.
......@@ -971,35 +986,54 @@ static void flush_pending_writes(struct r10conf *conf)
static void raise_barrier(struct r10conf *conf, int force)
{
BUG_ON(force && !conf->barrier);
spin_lock_irq(&conf->resync_lock);
write_seqlock_irq(&conf->resync_lock);
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
conf->resync_lock);
wait_event_barrier(conf, force || !conf->nr_waiting);
/* block any new IO from starting */
conf->barrier++;
WRITE_ONCE(conf->barrier, conf->barrier + 1);
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
conf->resync_lock);
wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
conf->barrier < RESYNC_DEPTH);
spin_unlock_irq(&conf->resync_lock);
write_sequnlock_irq(&conf->resync_lock);
}
static void lower_barrier(struct r10conf *conf)
{
unsigned long flags;
spin_lock_irqsave(&conf->resync_lock, flags);
conf->barrier--;
spin_unlock_irqrestore(&conf->resync_lock, flags);
write_seqlock_irqsave(&conf->resync_lock, flags);
WRITE_ONCE(conf->barrier, conf->barrier - 1);
write_sequnlock_irqrestore(&conf->resync_lock, flags);
wake_up(&conf->wait_barrier);
}
static bool wait_barrier_nolock(struct r10conf *conf)
{
unsigned int seq = read_seqbegin(&conf->resync_lock);
if (READ_ONCE(conf->barrier))
return false;
atomic_inc(&conf->nr_pending);
if (!read_seqretry(&conf->resync_lock, seq))
return true;
if (atomic_dec_and_test(&conf->nr_pending))
wake_up_barrier(conf);
return false;
}
static void wait_barrier(struct r10conf *conf)
{
spin_lock_irq(&conf->resync_lock);
if (wait_barrier_nolock(conf))
return;
write_seqlock_irq(&conf->resync_lock);
if (conf->barrier) {
conf->nr_waiting++;
/* Wait for the barrier to drop.
......@@ -1012,19 +1046,18 @@ static void wait_barrier(struct r10conf *conf)
* count down.
*/
raid10_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
wait_event_barrier(conf,
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
current->bio_list &&
(!bio_list_empty(&current->bio_list[0]) ||
!bio_list_empty(&current->bio_list[1]))),
conf->resync_lock);
!bio_list_empty(&current->bio_list[1]))));
conf->nr_waiting--;
if (!conf->nr_waiting)
wake_up(&conf->wait_barrier);
}
atomic_inc(&conf->nr_pending);
spin_unlock_irq(&conf->resync_lock);
write_sequnlock_irq(&conf->resync_lock);
}
static void allow_barrier(struct r10conf *conf)
......@@ -1048,27 +1081,26 @@ static void freeze_array(struct r10conf *conf, int extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
spin_lock_irq(&conf->resync_lock);
write_seqlock_irq(&conf->resync_lock);
conf->array_freeze_pending++;
conf->barrier++;
WRITE_ONCE(conf->barrier, conf->barrier + 1);
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
wait_event_barrier_cmd(conf,
atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
flush_pending_writes(conf));
conf->array_freeze_pending--;
spin_unlock_irq(&conf->resync_lock);
write_sequnlock_irq(&conf->resync_lock);
}
static void unfreeze_array(struct r10conf *conf)
{
/* reverse the effect of the freeze */
spin_lock_irq(&conf->resync_lock);
conf->barrier--;
write_seqlock_irq(&conf->resync_lock);
WRITE_ONCE(conf->barrier, conf->barrier - 1);
conf->nr_waiting--;
wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
write_sequnlock_irq(&conf->resync_lock);
}
static sector_t choose_data_offset(struct r10bio *r10_bio,
......@@ -3740,7 +3772,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list);
INIT_LIST_HEAD(&conf->bio_end_io_list);
spin_lock_init(&conf->resync_lock);
seqlock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
atomic_set(&conf->nr_pending, 0);
......@@ -4080,7 +4112,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
rdev->new_raid_disk = rdev->raid_disk * 2;
rdev->sectors = size;
}
conf->barrier = 1;
WRITE_ONCE(conf->barrier, 1);
}
return conf;
......
......@@ -77,7 +77,7 @@ struct r10conf {
struct bio_list pending_bio_list;
int pending_count;
spinlock_t resync_lock;
seqlock_t resync_lock;
atomic_t nr_pending;
int nr_waiting;
int nr_queued;
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment