Skip to content

Commit

Permalink
sched/fair: Fix incorrect usage of RCU in CPU select path
Browse files Browse the repository at this point in the history
The RCU readlock needs to be acquired before accessing schedtune, if not
its possible we may access stale/corrupted data. Move up the
rcu_read_lock to fix the issue.

Issue found with lockdep, which fires a warning on boot:
[    0.132198]
<kernel_src>include/linux/cgroup.h:455
suspicious rcu_dereference_check() usage!
[    0.132203] aother info that might help us debug this:a
[    0.132211] arcu_scheduler_active = 1, debug_locks = 0
[    0.132219] 3 locks held by swapper/0/1:
[    0.132225] #0:  (cpu_hotplug.dep_map){.+.+.+}, at: [<ffffff80080a7ee4>]
[    0.132261] AEXmod#1:  (smpboot_threads_lock){+.+.+.}, at: [<ffffff80080d5f04>]
[    0.132282] AOSP-JF-MM#2:  (&p->pi_lock){......}, at: [<ffffff80080e05fc>] try_to_wake_up+0x4c/0x598
[    0.132298] astack backtrace:
[    0.132307] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.9.84-g6cedde4aedec
[    0.132314] Hardware name: MSM sdm845 C1 EVT v1.0 (DT)
[    0.132322] Call trace:
[    0.132337] [<ffffff8008089678>] dump_backtrace+0x0/0x3c0
[    0.132344] [<ffffff800808966c>] show_stack+0x20/0x2c
[    0.132357] [<ffffff800847fbf0>] dump_stack+0xdc/0x12c
[    0.132367] [<ffffff800812261c>] lockdep_rcu_suspicious+0x13c/0x154
[    0.132377] [<ffffff80081148e4>] task_sched_boost+0x88/0xa0
[    0.132386] [<ffffff80080f738c>] select_energy_cpu_brute+0x284/0x23a4
[    0.132390] [<ffffff80080f4bdc>] select_task_rq_fair+0x390/0x1434
[    0.132395] [<ffffff80080e089c>] try_to_wake_up+0x2ec/0x598
[    0.132403] [<ffffff80080de1fc>] wake_up_process+0x24/0x30
[    0.132412] [<ffffff80080d0434>] __kthread_create_on_node+0xbc/0x1a8
[    0.132418] [<ffffff80080d0348>] kthread_create_on_node+0xa4/0xd4
[    0.132424] [<ffffff80080d066c>] kthread_create_on_cpu+0x40/0xe4
[    0.132432] [<ffffff80080d5cf0>] __smpboot_create_thread+0x8c/0x10c
[    0.132437] [<ffffff80080d5f50>] smpboot_register_percpu_thread_cpumask+0x90/0x14c
[    0.132450] [<ffffff8009a09744>] spawn_ksoftirqd+0x40/0x5c
[    0.132456] [<ffffff8008083900>] do_one_initcall+0x118/0x1d8
[    0.132466] [<ffffff8009a00ef0>] kernel_init_freeable+0x12c/0x290
[    0.132479] [<ffffff8009157b80>] kernel_init+0x14/0x220

Bug: 110360156
Change-Id: Ie908090225e491fde6e0c0c836e3e986b262ca85
Signed-off-by: Joel Fernandes (Google) <[email protected]>
[[email protected]: Moved up RCU readlock to 'select_task_rq_fair' to
 protect total function.]
Signed-off-by: Lingutla Chandrasekhar <[email protected]>
Signed-off-by: mydongistiny <[email protected]>
Signed-off-by: DennySPB <[email protected]>
  • Loading branch information
joelagnel authored and DennySPB committed Dec 12, 2018
1 parent db469d3 commit ce79495
Showing 1 changed file with 12 additions and 12 deletions.
24 changes: 12 additions & 12 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -6472,12 +6472,10 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
prefer_idle = 0;
#endif

rcu_read_lock();

sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
if (!sd) {
target_cpu = prev_cpu;
goto unlock;
goto out;
}

sync_entity_load_avg(&p->se);
Expand All @@ -6486,22 +6484,22 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
next_cpu = find_best_target(p, &backup_cpu, boosted, prefer_idle);
if (next_cpu == -1) {
target_cpu = prev_cpu;
goto unlock;
goto out;
}

/* Unconditionally prefer IDLE CPUs for boosted/prefer_idle tasks */
if ((boosted || prefer_idle) && idle_cpu(next_cpu)) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_idle_bt);
schedstat_inc(this_rq(), eas_stats.secb_idle_bt);
target_cpu = next_cpu;
goto unlock;
goto out;
}

target_cpu = prev_cpu;
if (next_cpu == prev_cpu) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_count);
schedstat_inc(this_rq(), eas_stats.secb_count);
goto unlock;
goto out;
}

eenv = rcu_dereference(per_cpu(energy_env, cpu));
Expand All @@ -6518,7 +6516,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
target_cpu = next_cpu;
goto unlock;
goto out;
}

cpumask_clear(&eenv->cpus_mask);
Expand All @@ -6545,9 +6543,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync

target_cpu = eenv->next_cpu;

unlock:
rcu_read_unlock();

out:
return target_cpu;
}

Expand Down Expand Up @@ -6590,8 +6586,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
cpumask_test_cpu(cpu, &p->cpus_allowed);
}

if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
return select_energy_cpu_brute(p, prev_cpu, sync);
if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized)) {
rcu_read_lock();
new_cpu = select_energy_cpu_brute(p, prev_cpu, sync);
rcu_read_unlock();
return new_cpu;
}

rcu_read_lock();
for_each_domain(cpu, tmp) {
Expand Down

0 comments on commit ce79495

Please sign in to comment.