Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Fix lockup by limiting load-balance retries on lock-break
sched: Fix CONFIG_CGROUP_SCHED dependency
sched: Remove empty #ifdefs
diff --git a/init/Kconfig b/init/Kconfig
index a075765..018d206 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -713,7 +713,6 @@
menuconfig CGROUP_SCHED
bool "Group CPU scheduler"
- depends on EXPERIMENTAL
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cecbb64be..fd7b25e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7134,10 +7134,6 @@
#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-#else /* !CONFIG_RT_GROUP_SCHED */
-#endif /* CONFIG_RT_GROUP_SCHED */
-
#ifdef CONFIG_CGROUP_SCHED
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
@@ -7246,9 +7242,6 @@
}
#endif /* CONFIG_CGROUP_SCHED */
-#ifdef CONFIG_FAIR_GROUP_SCHED
-#endif
-
#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
static unsigned long to_ratio(u64 period, u64 runtime)
{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8e42de9..84adb2d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3130,8 +3130,10 @@
}
#define LBF_ALL_PINNED 0x01
-#define LBF_NEED_BREAK 0x02
-#define LBF_ABORT 0x04
+#define LBF_NEED_BREAK 0x02 /* clears into HAD_BREAK */
+#define LBF_HAD_BREAK 0x04
+#define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */
+#define LBF_ABORT 0x10
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
@@ -4508,7 +4510,9 @@
goto out_balanced;
if (lb_flags & LBF_NEED_BREAK) {
- lb_flags &= ~LBF_NEED_BREAK;
+ lb_flags += LBF_HAD_BREAK - LBF_NEED_BREAK;
+ if (lb_flags & LBF_ABORT)
+ goto out_balanced;
goto redo;
}