summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-09 17:32:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-09 17:32:20 -0700
commit24532f768121b07b16178ffb40442ece43365cbd (patch)
treeca2eaf2c3ed031dd3aa977af95df77bfa2e18cc6 /block/blk-mq.c
parent12e3d3cdd975fe986cc5c35f60b1467a8ec20b80 (diff)
parent97a32864e6de5944c6356049f60569de01e9ba1f (diff)
Merge branch 'for-4.9/block-smp' of git://git.kernel.dk/linux-block
Pull blk-mq CPU hotplug update from Jens Axboe: "This is the conversion of blk-mq to the new hotplug state machine" * 'for-4.9/block-smp' of git://git.kernel.dk/linux-block: blk-mq: fixup "Convert to new hotplug state machine" blk-mq: Convert to new hotplug state machine blk-mq/cpu-notif: Convert to new hotplug state machine
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c123
1 files changed, 56 insertions, 67 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b65f572a4faf..ddc2eed64771 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1563,11 +1563,13 @@ fail:
* software queue to the hw queue dispatch list, and ensure that it
* gets run.
*/
-static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
+static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
{
+ struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
+ hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
spin_lock(&ctx->lock);
@@ -1578,30 +1580,20 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
spin_unlock(&ctx->lock);
if (list_empty(&tmp))
- return NOTIFY_OK;
+ return 0;
spin_lock(&hctx->lock);
list_splice_tail_init(&tmp, &hctx->dispatch);
spin_unlock(&hctx->lock);
blk_mq_run_hw_queue(hctx, true);
- return NOTIFY_OK;
+ return 0;
}
-static int blk_mq_hctx_notify(void *data, unsigned long action,
- unsigned int cpu)
+static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{
- struct blk_mq_hw_ctx *hctx = data;
-
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
- return blk_mq_hctx_cpu_offline(hctx, cpu);
-
- /*
- * In case of CPU online, tags may be reallocated
- * in blk_mq_map_swqueue() after mapping is updated.
- */
-
- return NOTIFY_OK;
+ cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
+ &hctx->cpuhp_dead);
}
/* hctx->ctxs will be freed in queue's release handler */
@@ -1621,7 +1613,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
- blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
sbitmap_free(&hctx->ctx_map);
}
@@ -1668,9 +1660,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue_num = hctx_idx;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
- blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
- blk_mq_hctx_notify, hctx);
- blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
hctx->tags = set->tags[hctx_idx];
@@ -1715,8 +1705,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_ctxs:
kfree(hctx->ctxs);
unregister_cpu_notifier:
- blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
-
+ blk_mq_remove_cpuhp(hctx);
return -1;
}
@@ -2089,50 +2078,18 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_sysfs_register(q);
}
-static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
- unsigned long action, void *hcpu)
+/*
+ * New online cpumask which is going to be set in this hotplug event.
+ * Declare this cpumasks as global as cpu-hotplug operation is invoked
+ * one-by-one and dynamically allocating this could result in a failure.
+ */
+static struct cpumask cpuhp_online_new;
+
+static void blk_mq_queue_reinit_work(void)
{
struct request_queue *q;
- int cpu = (unsigned long)hcpu;
- /*
- * New online cpumask which is going to be set in this hotplug event.
- * Declare this cpumasks as global as cpu-hotplug operation is invoked
- * one-by-one and dynamically allocating this could result in a failure.
- */
- static struct cpumask online_new;
-
- /*
- * Before hotadded cpu starts handling requests, new mappings must
- * be established. Otherwise, these requests in hw queue might
- * never be dispatched.
- *
- * For example, there is a single hw queue (hctx) and two CPU queues
- * (ctx0 for CPU0, and ctx1 for CPU1).
- *
- * Now CPU1 is just onlined and a request is inserted into
- * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
- * still zero.
- *
- * And then while running hw queue, flush_busy_ctxs() finds bit0 is
- * set in pending bitmap and tries to retrieve requests in
- * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
- * so the request in ctx1->rq_list is ignored.
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- cpumask_copy(&online_new, cpu_online_mask);
- break;
- case CPU_UP_PREPARE:
- cpumask_copy(&online_new, cpu_online_mask);
- cpumask_set_cpu(cpu, &online_new);
- break;
- default:
- return NOTIFY_OK;
- }
mutex_lock(&all_q_mutex);
-
/*
* We need to freeze and reinit all existing queues. Freezing
* involves synchronous wait for an RCU grace period and doing it
@@ -2153,13 +2110,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
}
list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_queue_reinit(q, &online_new);
+ blk_mq_queue_reinit(q, &cpuhp_online_new);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_unfreeze_queue(q);
mutex_unlock(&all_q_mutex);
- return NOTIFY_OK;
+}
+
+static int blk_mq_queue_reinit_dead(unsigned int cpu)
+{
+ cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+ blk_mq_queue_reinit_work();
+ return 0;
+}
+
+/*
+ * Before hotadded cpu starts handling requests, new mappings must be
+ * established. Otherwise, these requests in hw queue might never be
+ * dispatched.
+ *
+ * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
+ * for CPU0, and ctx1 for CPU1).
+ *
+ * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
+ * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
+ *
+ * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
+ * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
+ * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
+ * is ignored.
+ */
+static int blk_mq_queue_reinit_prepare(unsigned int cpu)
+{
+ cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+ cpumask_set_cpu(cpu, &cpuhp_online_new);
+ blk_mq_queue_reinit_work();
+ return 0;
}
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
@@ -2378,10 +2365,12 @@ void blk_mq_enable_hotplug(void)
static int __init blk_mq_init(void)
{
- blk_mq_cpu_init();
-
- hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
+ cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
+ blk_mq_hctx_notify_dead);
+ cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
+ blk_mq_queue_reinit_prepare,
+ blk_mq_queue_reinit_dead);
return 0;
}
subsys_initcall(blk_mq_init);