From 7b99b00cf3f07c26ca862ca7defbe370a1b2575b Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 16 Nov 2017 08:08:44 +0800 Subject: block: wake up all tasks blocked in get_request() [ Upstream commit 34d9715ac1edd50285168dd8d80c972739a4f6a4 ] Once blk_set_queue_dying() is done in blk_cleanup_queue(), we call blk_freeze_queue() and wait for q->q_usage_counter becoming zero. But if there are tasks blocked in get_request(), q->q_usage_counter can never become zero. So we have to wake up all these tasks in blk_set_queue_dying() first. Fixes: 3ef28e83ab157997 ("block: generic request_queue reference counting") Signed-off-by: Ming Lei Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/blk-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 119658534dfd..f5f1a55703ae 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -526,8 +526,8 @@ void blk_set_queue_dying(struct request_queue *q) blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { - wake_up(&rl->wait[BLK_RW_SYNC]); - wake_up(&rl->wait[BLK_RW_ASYNC]); + wake_up_all(&rl->wait[BLK_RW_SYNC]); + wake_up_all(&rl->wait[BLK_RW_ASYNC]); } } } -- cgit v1.2.3 From 047a7bb12d63c41569ce9c9e88f5b231fd1f6cf5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 23 Sep 2017 15:51:23 -0400 Subject: more bio_map_user_iov() leak fixes commit 2b04e8f6bbb196cab4b232af0f8d48ff2c7a8058 upstream. we need to take care of failure exit as well - pages already in bio should be dropped by analogue of bio_unmap_pages(), since their refcounts had been bumped only once per reference in bio. Cc: stable@vger.kernel.org Signed-off-by: Al Viro Signed-off-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- block/bio.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/bio.c b/block/bio.c index 68bbc835bacc..63363a689922 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1268,6 +1268,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, int ret, offset; struct iov_iter i; struct iovec iov; + struct bio_vec *bvec; iov_for_each(iov, i, *iter) { unsigned long uaddr = (unsigned long) iov.iov_base; @@ -1312,7 +1313,12 @@ struct bio *bio_map_user_iov(struct request_queue *q, ret = get_user_pages_fast(uaddr, local_nr_pages, (iter->type & WRITE) != WRITE, &pages[cur_page]); - if (ret < local_nr_pages) { + if (unlikely(ret < local_nr_pages)) { + for (j = cur_page; j < page_limit; j++) { + if (!pages[j]) + break; + put_page(pages[j]); + } ret = -EFAULT; goto out_unmap; } @@ -1374,10 +1380,8 @@ struct bio *bio_map_user_iov(struct request_queue *q, return bio; out_unmap: - for (j = 0; j < nr_pages; j++) { - if (!pages[j]) - break; - page_cache_release(pages[j]); + bio_for_each_segment_all(bvec, bio, j) { + put_page(bvec->bv_page); } out: kfree(pages); -- cgit v1.2.3