BACKPORT: sched: Fix stop_one_cpu_nowait() vs hotplug
[ Upstream commit f0498d2a54e7966ce23cd7c7ff42c64fa0059b07 ]
Kuyo reported sporadic failures on a sched_setaffinity() vs CPU
hotplug stress-test -- notably affine_move_task() remains stuck in
wait_for_completion(), leading to a hung-task detector warning.
Specifically, it was reported that stop_one_cpu_nowait(.fn =
migration_cpu_stop) returns false -- this stopper is responsible for
the matching complete().
The race scenario is:
CPU0 CPU1
// doing _cpu_down()
__set_cpus_allowed_ptr()
task_rq_lock();
takedown_cpu()
stop_machine_cpuslocked(take_cpu_down..)
<PREEMPT: cpu_stopper_thread()
MULTI_STOP_PREPARE
...
__set_cpus_allowed_ptr_locked()
affine_move_task()
task_rq_unlock();
<PREEMPT: cpu_stopper_thread()\>
ack_state()
MULTI_STOP_RUN
take_cpu_down()
__cpu_disable();
stop_machine_park();
stopper->enabled = false;
/>
/>
stop_one_cpu_nowait(.fn = migration_cpu_stop);
if (stopper->enabled) // false!!!
That is, by doing stop_one_cpu_nowait() after dropping rq-lock, the
stopper thread gets a chance to preempt and allows the cpu-down for
the target CPU to complete.
OTOH, since stop_one_cpu_nowait() / cpu_stop_queue_work() needs to
issue a wakeup, it must not be ran under the scheduler locks.
Solve this apparent contradiction by keeping preemption disabled over
the unlock + queue_stopper combination:
preempt_disable();
task_rq_unlock(...);
if (!stop_pending)
stop_one_cpu_nowait(...)
preempt_enable();
This respects the lock ordering contraints while still avoiding the
above race. That is, if we find the CPU is online under rq-lock, the
targeted stop_one_cpu_nowait() must succeed.
Apply this pattern to all similar stop_one_cpu_nowait() invocations.
Bug: 317318329
Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
Reported-by: "Kuyo Chang (張建文)" <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Tested-by: "Kuyo Chang (張建文)" <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: Dylan Chang <[email protected]>
Change-Id: Ib2cc52566f43c3c10f694ce9c1c6a6569b4e2687
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bb9dfd7..29d4338 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2441,9 +2441,11 @@ static int migration_cpu_stop(void *data)
* it.
*/
WARN_ON_ONCE(!pending->stop_pending);
+ preempt_disable();
task_rq_unlock(rq, p, &rf);
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
&pending->arg, &pending->stop_work);
+ preempt_enable();
return 0;
}
out:
@@ -2725,12 +2727,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
complete = true;
}
+ preempt_disable();
task_rq_unlock(rq, p, rf);
-
if (push_task) {
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
p, &rq->push_work);
}
+ preempt_enable();
if (complete)
complete_all(&pending->done);
@@ -2796,12 +2799,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
if (flags & SCA_MIGRATE_ENABLE)
p->migration_flags &= ~MDF_PUSH;
+ preempt_disable();
task_rq_unlock(rq, p, rf);
-
if (!stop_pending) {
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
&pending->arg, &pending->stop_work);
}
+ preempt_enable();
if (flags & SCA_MIGRATE_ENABLE)
return 0;
@@ -9130,9 +9134,11 @@ static void balance_push(struct rq *rq)
* Temporarily drop rq->lock such that we can wake-up the stop task.
* Both preemption and IRQs are still disabled.
*/
+ preempt_disable();
raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
this_cpu_ptr(&push_work));
+ preempt_enable();
/*
* At this point need_resched() is true and we'll take the loop in
* schedule(). The next pick is obviously going to be the stop task
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9372659..fff2650 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2314,9 +2314,11 @@ static void pull_dl_task(struct rq *this_rq)
double_unlock_balance(this_rq, src_rq);
if (push_task) {
+ preempt_disable();
raw_spin_rq_unlock(this_rq);
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
push_task, &src_rq->push_work);
+ preempt_enable();
raw_spin_rq_lock(this_rq);
}
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 71bce8f..3c9d8eb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10276,13 +10276,15 @@ static int load_balance(int this_cpu, struct rq *this_rq,
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- raw_spin_rq_unlock_irqrestore(busiest, flags);
+ preempt_disable();
+ raw_spin_rq_unlock_irqrestore(busiest, flags);
if (active_balance) {
stop_one_cpu_nowait(cpu_of(busiest),
active_load_balance_cpu_stop, busiest,
&busiest->active_balance_work);
}
+ preempt_enable();
}
} else {
sd->nr_balance_failed = 0;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 61e8ab4..e3c83c20 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2045,9 +2045,11 @@ static int push_rt_task(struct rq *rq, bool pull)
*/
push_task = get_push_task(rq);
if (push_task) {
+ preempt_disable();
raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
push_task, &rq->push_work);
+ preempt_enable();
raw_spin_rq_lock(rq);
}
@@ -2387,9 +2389,11 @@ static void pull_rt_task(struct rq *this_rq)
double_unlock_balance(this_rq, src_rq);
if (push_task) {
+ preempt_disable();
raw_spin_rq_unlock(this_rq);
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
push_task, &src_rq->push_work);
+ preempt_enable();
raw_spin_rq_lock(this_rq);
}
}