diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1dbf4e1f1c0fb84dbc4fad81b87d99338282d824..327fc4adf139d8fd91b8c0912f8227afc06e61b1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5120,6 +5120,10 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 
 	lockdep_assert_held(&rq->lock);
 
+#ifdef CONFIG_QOS_SCHED
+	unthrottle_qos_cfs_rqs(cpu_of(rq));
+#endif
+
 	rcu_read_lock();
 	list_for_each_entry_rcu(tg, &task_groups, list) {
 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
@@ -5142,10 +5146,6 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 			unthrottle_cfs_rq(cfs_rq);
 	}
 	rcu_read_unlock();
-
-#ifdef CONFIG_QOS_SCHED
-	unthrottle_qos_cfs_rqs(cpu_of(rq));
-#endif
 }
 
 #else /* CONFIG_CFS_BANDWIDTH */
@@ -6890,6 +6890,27 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
 
 #ifdef CONFIG_QOS_SCHED
 static void start_qos_hrtimer(int cpu);
+
+static int qos_tg_unthrottle_up(struct task_group *tg, void *data)
+{
+	struct rq *rq = data;
+	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+
+	cfs_rq->throttle_count--;
+
+	return 0;
+}
+
+static int qos_tg_throttle_down(struct task_group *tg, void *data)
+{
+	struct rq *rq = data;
+	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+
+	cfs_rq->throttle_count++;
+
+	return 0;
+}
+
 static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
 {
 	struct rq *rq = rq_of(cfs_rq);
@@ -6900,7 +6921,7 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
 
 	/* freeze hierarchy runnable averages while throttled */
 	rcu_read_lock();
-	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
+	walk_tg_tree_from(cfs_rq->tg, qos_tg_throttle_down, tg_nop, (void *)rq);
 	rcu_read_unlock();
 
 	task_delta = cfs_rq->h_nr_running;
@@ -6928,7 +6949,6 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
 		start_qos_hrtimer(cpu_of(rq));
 
 	cfs_rq->throttled = 1;
-	cfs_rq->throttled_clock = rq_clock(rq);
 
 	list_add(&cfs_rq->qos_throttled_list,
 		 &per_cpu(qos_throttled_cfs_rq, cpu_of(rq)));
@@ -6937,7 +6957,6 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
 static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
 {
 	struct rq *rq = rq_of(cfs_rq);
-	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
 	struct sched_entity *se;
 	int enqueue = 1;
 	long task_delta, idle_task_delta;
@@ -6947,12 +6966,12 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
 	cfs_rq->throttled = 0;
 
 	update_rq_clock(rq);
-
-	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
 	list_del_init(&cfs_rq->qos_throttled_list);
 
 	/* update hierarchical throttle state */
-	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
+	rcu_read_lock();
+	walk_tg_tree_from(cfs_rq->tg, tg_nop, qos_tg_unthrottle_up, (void *)rq);
+	rcu_read_unlock();
 
 	if (!cfs_rq->load.weight)
 		return;