From 3de5e88485b22f30403045bd83d4815ae2207b19 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 3 Jun 2014 15:33:27 +0800 Subject: workqueue: clear POOL_DISASSOCIATED in rebind_workers() a9ab775bcadf ("workqueue: directly restore CPU affinity of workers from CPU_ONLINE") moved pool locking into rebind_workers() but left "pool->flags &= ~POOL_DISASSOCIATED" in workqueue_cpu_up_callback(). There is nothing necessarily wrong with it, but there is no benefit either. Let's move it into rebind_workers() and achieve the following benefits: 1) better readability, POOL_DISASSOCIATED is cleared in rebind_workers() as expected. 2) we can guarantee that, when POOL_DISASSOCIATED is clear, the running workers of the pool are on the local CPU (pool->cpu). tj: Minor description update. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8474e5752f1f..68461b8d9b39 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4535,6 +4535,7 @@ static void rebind_workers(struct worker_pool *pool) pool->attrs->cpumask) < 0); spin_lock_irq(&pool->lock); + pool->flags &= ~POOL_DISASSOCIATED; for_each_pool_worker(worker, pool) { unsigned int worker_flags = worker->flags; @@ -4637,10 +4638,6 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb, mutex_lock(&pool->attach_mutex); if (pool->cpu == cpu) { - spin_lock_irq(&pool->lock); - pool->flags &= ~POOL_DISASSOCIATED; - spin_unlock_irq(&pool->lock); - rebind_workers(pool); } else if (pool->cpu < 0) { restore_unbound_workers_cpumask(pool, cpu); -- cgit v1.2.3