summaryrefslogtreecommitdiff
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-09-18 11:56:28 -0400
committerTejun Heo <tj@kernel.org>2015-09-18 11:56:28 -0400
commit9e10a130d9b62af976d17d120c95f3650769312c (patch)
tree2a580955a56434f6e7c455a8f0ebf6eec71a9194 /kernel/cpuset.c
parentfc5ed1e95410ad73b2ab8f33cd90eb3bcf6c98a1 (diff)
cgroup: replace cgroup_on_dfl() tests in controllers with cgroup_subsys_on_dfl()
cgroup_on_dfl() tests whether the cgroup's root is the default hierarchy; however, an individual controller is only interested in whether the controller is attached to the default hierarchy and never tests a cgroup which doesn't belong to the hierarchy that the controller is attached to. This patch replaces cgroup_on_dfl() tests in controllers with faster static_key based cgroup_subsys_on_dfl(). This leaves cgroup core as the only user of cgroup_on_dfl() and the function is moved from the header file to cgroup.c. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Zefan Li <lizefan@huawei.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f0acff0f66c9..20eedd8098c0 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -473,7 +473,8 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
/* On legacy hiearchy, we must be a subset of our parent cpuset. */
ret = -EACCES;
- if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par))
+ if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ !is_cpuset_subset(trial, par))
goto out;
/*
@@ -879,7 +880,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some CPUs.
*/
- if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ cpumask_empty(new_cpus))
cpumask_copy(new_cpus, parent->effective_cpus);
/* Skip the whole subtree if the cpumask remains the same. */
@@ -896,7 +898,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
cpumask_copy(cp->effective_cpus, new_cpus);
spin_unlock_irq(&callback_lock);
- WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+ WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
update_tasks_cpumask(cp);
@@ -1135,7 +1137,8 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some MEMs.
*/
- if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ nodes_empty(*new_mems))
*new_mems = parent->effective_mems;
/* Skip the whole subtree if the nodemask remains the same. */
@@ -1152,7 +1155,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
cp->effective_mems = *new_mems;
spin_unlock_irq(&callback_lock);
- WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+ WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
update_tasks_nodemask(cp);
@@ -1440,7 +1443,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
/* allow moving tasks into an empty cpuset if on default hierarchy */
ret = -ENOSPC;
- if (!cgroup_on_dfl(css->cgroup) &&
+ if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock;
@@ -1952,7 +1955,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpuset_inc();
spin_lock_irq(&callback_lock);
- if (cgroup_on_dfl(cs->css.cgroup)) {
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
}
@@ -2029,7 +2032,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
mutex_lock(&cpuset_mutex);
spin_lock_irq(&callback_lock);
- if (cgroup_on_dfl(root_css->cgroup)) {
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
top_cpuset.mems_allowed = node_possible_map;
} else {
@@ -2210,7 +2213,7 @@ retry:
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
mems_updated = !nodes_equal(new_mems, cs->effective_mems);
- if (cgroup_on_dfl(cs->css.cgroup))
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
hotplug_update_tasks(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
else
@@ -2241,7 +2244,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
static cpumask_t new_cpus;
static nodemask_t new_mems;
bool cpus_updated, mems_updated;
- bool on_dfl = cgroup_on_dfl(top_cpuset.css.cgroup);
+ bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
mutex_lock(&cpuset_mutex);