Skip to content
Snippets Groups Projects
Commit ebca52ab authored by Hui Tang's avatar Hui Tang Committed by Yongqiang Liu
Browse files

sched: Add statistics for scheduler dynamic affinity

hulk inclusion
category: feature
bugzilla: 187173, https://gitee.com/openeuler/kernel/issues/I5G4IH


CVE: NA

--------------------------------

Signed-off-by: default avatarHui Tang <tanghui20@huawei.com>
Reviewed-by: default avatarChen Hui <judy.chenhui@huawei.com>
Reviewed-by: default avatarZhang Qiao <zhangqiao22@huawei.com>
Signed-off-by: default avatarYongqiang Liu <liuyongqiang13@huawei.com>
parent 2af15a46
No related branches found
No related tags found
No related merge requests found
......@@ -444,6 +444,15 @@ struct sched_statistics {
#endif
};
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
struct dyn_affinity_stats {
#ifdef CONFIG_SCHEDSTATS
u64 nr_wakeups_preferred_cpus;
u64 nr_wakeups_force_preferred_cpus;
#endif
};
#endif
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
......@@ -480,7 +489,15 @@ struct sched_entity {
struct sched_avg avg;
#endif
#if !defined(__GENKSYMS__)
#if defined(CONFIG_QOS_SCHED_DYNAMIC_AFFINITY)
struct dyn_affinity_stats *dyn_affi_stats;
#else
KABI_RESERVE(1)
#endif
#else
KABI_RESERVE(1)
#endif
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
......
......@@ -7203,12 +7203,20 @@ int sched_prefer_cpus_fork(struct task_struct *p, struct task_struct *orig)
else
cpumask_clear(p->prefer_cpus);
p->se.dyn_affi_stats = kzalloc(sizeof(struct dyn_affinity_stats),
GFP_KERNEL);
if (!p->se.dyn_affi_stats) {
kfree(p->prefer_cpus);
p->prefer_cpus = NULL;
return -ENOMEM;
}
return 0;
}
void sched_prefer_cpus_free(struct task_struct *p)
{
kfree(p->prefer_cpus);
kfree(p->se.dyn_affi_stats);
}
static void do_set_prefer_cpus(struct task_struct *p,
......
......@@ -925,6 +925,9 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
struct seq_file *m)
{
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
struct dyn_affinity_stats *dyn_affi = p->se.dyn_affi_stats;
#endif
unsigned long nr_switches;
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
......@@ -983,6 +986,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
__P(dyn_affi->nr_wakeups_preferred_cpus);
__P(dyn_affi->nr_wakeups_force_preferred_cpus);
#endif
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
......
......@@ -6714,6 +6714,8 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu,
if (available_idle_cpu(cpu)) {
rcu_read_unlock();
p->select_cpus = p->prefer_cpus;
if (sd_flag & SD_BALANCE_WAKE)
schedstat_inc(p->se.dyn_affi_stats->nr_wakeups_preferred_cpus);
return;
}
......@@ -6725,6 +6727,8 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu,
if (tg_capacity > cpumask_weight(p->prefer_cpus) &&
util_avg_sum * 100 <= tg_capacity * sysctl_sched_util_low_pct) {
p->select_cpus = p->prefer_cpus;
if (sd_flag & SD_BALANCE_WAKE)
schedstat_inc(p->se.dyn_affi_stats->nr_wakeups_preferred_cpus);
}
}
#endif
......@@ -6814,8 +6818,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
rcu_read_unlock();
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
if (!cpumask_test_cpu(new_cpu, p->select_cpus))
if (!cpumask_test_cpu(new_cpu, p->select_cpus)) {
new_cpu = idlest_cpu;
schedstat_inc(p->se.dyn_affi_stats->nr_wakeups_force_preferred_cpus);
}
#endif
schedstat_end_time(cpu_rq(cpu), time);
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment