summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c94
1 files changed, 49 insertions, 45 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2f83448d34fe..3cd57825c75f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -726,14 +726,6 @@ next:
start = last_end + 1;
if (start <= end && state && !need_resched())
goto hit_next;
- goto search_again;
-
-out:
- spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
-
- return 0;
search_again:
if (start > end)
@@ -742,6 +734,14 @@ search_again:
if (gfpflags_allow_blocking(mask))
cond_resched();
goto again;
+
+out:
+ spin_unlock(&tree->lock);
+ if (prealloc)
+ free_extent_state(prealloc);
+
+ return 0;
+
}
static void wait_on_state(struct extent_io_tree *tree,
@@ -873,8 +873,14 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
bits |= EXTENT_FIRST_DELALLOC;
again:
if (!prealloc && gfpflags_allow_blocking(mask)) {
+ /*
+ * Don't care for allocation failure here because we might end
+ * up not needing the pre-allocated extent state at all, which
+ * is the case if we only have in the tree extent states that
+ * cover our input range and don't cover too any other range.
+ * If we end up needing a new extent state we allocate it later.
+ */
prealloc = alloc_extent_state(mask);
- BUG_ON(!prealloc);
}
spin_lock(&tree->lock);
@@ -1037,7 +1043,13 @@ hit_next:
goto out;
}
- goto search_again;
+search_again:
+ if (start > end)
+ goto out;
+ spin_unlock(&tree->lock);
+ if (gfpflags_allow_blocking(mask))
+ cond_resched();
+ goto again;
out:
spin_unlock(&tree->lock);
@@ -1046,13 +1058,6 @@ out:
return err;
-search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
- goto again;
}
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1073,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* @bits: the bits to set in this range
* @clear_bits: the bits to clear in this range
* @cached_state: state that we're going to cache
- * @mask: the allocation mask
*
* This will go through and set bits for the given range. If any states exist
* already in this range they are set with the given bit and cleared of the
* clear_bits. This is only meant to be used by things that are mergeable, ie
* converting from say DELALLOC to DIRTY. This is not meant to be used with
* boundary bits like LOCK.
+ *
+ * All allocations are done with GFP_NOFS.
*/
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, unsigned clear_bits,
- struct extent_state **cached_state, gfp_t mask)
+ struct extent_state **cached_state)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -1098,7 +1104,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
btrfs_debug_check_extent_io_range(tree, start, end);
again:
- if (!prealloc && gfpflags_allow_blocking(mask)) {
+ if (!prealloc) {
/*
* Best effort, don't worry if extent state allocation fails
* here for the first iteration. We might have a cached state
@@ -1106,7 +1112,7 @@ again:
* extent state allocations are needed. We'll only know this
* after locking the tree.
*/
- prealloc = alloc_extent_state(mask);
+ prealloc = alloc_extent_state(GFP_NOFS);
if (!prealloc && !first_iteration)
return -ENOMEM;
}
@@ -1263,7 +1269,13 @@ hit_next:
goto out;
}
- goto search_again;
+search_again:
+ if (start > end)
+ goto out;
+ spin_unlock(&tree->lock);
+ cond_resched();
+ first_iteration = false;
+ goto again;
out:
spin_unlock(&tree->lock);
@@ -1271,21 +1283,11 @@ out:
free_extent_state(prealloc);
return err;
-
-search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
- first_iteration = false;
- goto again;
}
/* wrappers around set/clear extent bit */
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, gfp_t mask,
- struct extent_changeset *changeset)
+ unsigned bits, struct extent_changeset *changeset)
{
/*
* We don't support EXTENT_LOCKED yet, as current changeset will
@@ -1295,7 +1297,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
BUG_ON(bits & EXTENT_LOCKED);
- return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
+ return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
changeset);
}
@@ -1308,8 +1310,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
}
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, gfp_t mask,
- struct extent_changeset *changeset)
+ unsigned bits, struct extent_changeset *changeset)
{
/*
* Don't support EXTENT_LOCKED case, same reason as
@@ -1317,7 +1318,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
BUG_ON(bits & EXTENT_LOCKED);
- return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
+ return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
changeset);
}
@@ -1975,13 +1976,13 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec)
set_state_failrec(failure_tree, rec->start, NULL);
ret = clear_extent_bits(failure_tree, rec->start,
rec->start + rec->len - 1,
- EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+ EXTENT_LOCKED | EXTENT_DIRTY);
if (ret)
err = ret;
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
rec->start + rec->len - 1,
- EXTENT_DAMAGED, GFP_NOFS);
+ EXTENT_DAMAGED);
if (ret && !err)
err = ret;
@@ -2232,13 +2233,12 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
/* set the bits in the private failure tree */
ret = set_extent_bits(failure_tree, start, end,
- EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+ EXTENT_LOCKED | EXTENT_DIRTY);
if (ret >= 0)
ret = set_state_failrec(failure_tree, start, failrec);
/* set the bits in the inode's tree */
if (ret >= 0)
- ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
- GFP_NOFS);
+ ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
if (ret < 0) {
kfree(failrec);
return ret;
@@ -4389,8 +4389,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ret < 0) {
btrfs_free_path(path);
return ret;
+ } else {
+ WARN_ON(!ret);
+ if (ret == 1)
+ ret = 0;
}
- WARN_ON(!ret);
+
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
found_type = found_key.type;
@@ -4601,7 +4605,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
if (mapped)
spin_unlock(&page->mapping->private_lock);
- /* One for when we alloced the page */
+ /* One for when we allocated the page */
put_page(page);
} while (index != 0);
}
@@ -5761,7 +5765,7 @@ int try_release_extent_buffer(struct page *page)
struct extent_buffer *eb;
/*
- * We need to make sure noboody is attaching this page to an eb right
+ * We need to make sure nobody is attaching this page to an eb right
* now.
*/
spin_lock(&page->mapping->private_lock);