summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/delayed-inode.c32
-rw-r--r--fs/btrfs/extent_io.c31
-rw-r--r--fs/btrfs/inode.c19
-rw-r--r--fs/btrfs/ordered-data.c5
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/btrfs/super.c12
-rw-r--r--fs/btrfs/tree-log.c8
7 files changed, 85 insertions, 43 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3df7b9d7fbe8..59b489d7e4b5 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -152,37 +152,39 @@ again:
return ERR_PTR(-ENOMEM);
btrfs_init_delayed_node(node, root, ino);
+ /* Cached in the inode and can be accessed. */
+ refcount_set(&node->refs, 2);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_NOFS);
+ btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker, GFP_NOFS);
+
/* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
- if (ret == -ENOMEM) {
- btrfs_delayed_node_ref_tracker_dir_exit(node);
- kmem_cache_free(delayed_node_cache, node);
- return ERR_PTR(-ENOMEM);
- }
+ if (ret == -ENOMEM)
+ goto cleanup;
+
xa_lock(&root->delayed_nodes);
ptr = xa_load(&root->delayed_nodes, ino);
if (ptr) {
/* Somebody inserted it, go back and read it. */
xa_unlock(&root->delayed_nodes);
- btrfs_delayed_node_ref_tracker_dir_exit(node);
- kmem_cache_free(delayed_node_cache, node);
- node = NULL;
- goto again;
+ goto cleanup;
}
ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
ASSERT(xa_err(ptr) != -EINVAL);
ASSERT(xa_err(ptr) != -ENOMEM);
ASSERT(ptr == NULL);
-
- /* Cached in the inode and can be accessed. */
- refcount_set(&node->refs, 2);
- btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
- btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker, GFP_ATOMIC);
-
btrfs_inode->delayed_node = node;
xa_unlock(&root->delayed_nodes);
return node;
+cleanup:
+ btrfs_delayed_node_ref_tracker_free(node, tracker);
+ btrfs_delayed_node_ref_tracker_free(node, &node->inode_cache_tracker);
+ btrfs_delayed_node_ref_tracker_dir_exit(node);
+ kmem_cache_free(delayed_node_cache, node);
+ if (ret)
+ return ERR_PTR(ret);
+ goto again;
}
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 23273d0e6f22..1a07edaefaa0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1691,14 +1691,15 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
unsigned long range_bitmap = 0;
bool submitted_io = false;
int found_error = 0;
+ const u64 end = start + len;
const u64 folio_start = folio_pos(folio);
+ const u64 folio_end = folio_start + folio_size(folio);
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
u64 cur;
int bit;
int ret = 0;
- ASSERT(start >= folio_start &&
- start + len <= folio_start + folio_size(folio));
+ ASSERT(start >= folio_start && end <= folio_end);
ret = btrfs_writepage_cow_fixup(folio);
if (ret == -EAGAIN) {
@@ -1714,7 +1715,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
return ret;
}
- for (cur = start; cur < start + len; cur += fs_info->sectorsize)
+ for (cur = start; cur < end; cur += fs_info->sectorsize)
set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
blocks_per_folio);
@@ -1725,8 +1726,25 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
if (cur >= i_size) {
+ struct btrfs_ordered_extent *ordered;
+ unsigned long flags;
+
+ ordered = btrfs_lookup_first_ordered_range(inode, cur,
+ fs_info->sectorsize);
+ /*
+ * We have just run delalloc before getting here, so
+ * there must be an ordered extent.
+ */
+ ASSERT(ordered != NULL);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
+ set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
+ ordered->truncated_len = min(ordered->truncated_len,
+ cur - ordered->file_offset);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ btrfs_put_ordered_extent(ordered);
+
btrfs_mark_ordered_io_finished(inode, folio, cur,
- start + len - cur, true);
+ fs_info->sectorsize, true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
@@ -1735,9 +1753,8 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
- btrfs_folio_clear_dirty(fs_info, folio, cur,
- start + len - cur);
- break;
+ btrfs_folio_clear_dirty(fs_info, folio, cur, fs_info->sectorsize);
+ continue;
}
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
if (unlikely(ret < 0)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 51401d586a7b..27a562bad6e8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3976,11 +3976,6 @@ static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path
btrfs_set_inode_mapping_order(inode);
cache_index:
- ret = btrfs_init_file_extent_tree(inode);
- if (ret)
- goto out;
- btrfs_inode_set_file_extent_range(inode, 0,
- round_up(i_size_read(vfs_inode), fs_info->sectorsize));
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
@@ -4067,6 +4062,20 @@ cache_acl:
btrfs_ino(inode), btrfs_root_id(root), ret);
}
+ /*
+ * We don't need the path anymore, so release it to avoid holding a read
+ * lock on a leaf while calling btrfs_init_file_extent_tree(), which can
+ * allocate memory that triggers reclaim (GFP_KERNEL) and cause a locking
+ * dependency.
+ */
+ btrfs_release_path(path);
+
+ ret = btrfs_init_file_extent_tree(inode);
+ if (ret)
+ goto out;
+ btrfs_inode_set_file_extent_range(inode, 0,
+ round_up(i_size_read(vfs_inode), fs_info->sectorsize));
+
if (!maybe_acls)
cache_no_acl(vfs_inode);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 2829f20d7bb5..8a8aa6ed405b 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1098,8 +1098,9 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
struct rb_node *prev;
struct rb_node *next;
struct btrfs_ordered_extent *entry = NULL;
+ unsigned long flags;
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
node = inode->ordered_tree.rb_node;
/*
* Here we don't want to use tree_search() which will use tree->last
@@ -1154,7 +1155,7 @@ out:
trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
}
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
return entry;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 31ad8580322a..febc22d1b648 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3244,9 +3244,15 @@ static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
{
struct btrfs_qgroup *src;
struct btrfs_qgroup *parent;
+ struct btrfs_qgroup *qgroup;
struct btrfs_qgroup_list *list;
+ LIST_HEAD(qgroup_list);
+ const u32 nodesize = fs_info->nodesize;
int nr_parents = 0;
+ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_FULL)
+ return 0;
+
src = find_qgroup_rb(fs_info, srcid);
if (!src)
return -ENOENT;
@@ -3281,8 +3287,19 @@ static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
if (parent->excl != parent->rfer)
return 1;
- parent->excl += fs_info->nodesize;
- parent->rfer += fs_info->nodesize;
+ qgroup_iterator_add(&qgroup_list, parent);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
+ qgroup->rfer += nodesize;
+ qgroup->rfer_cmpr += nodesize;
+ qgroup->excl += nodesize;
+ qgroup->excl_cmpr += nodesize;
+ qgroup_dirty(fs_info, qgroup);
+
+ /* Append parent qgroups to @qgroup_list. */
+ list_for_each_entry(list, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, list->group);
+ }
+ qgroup_iterator_clean(&qgroup_list);
return 0;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 430e7419349c..c40944ca7b94 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -736,14 +736,12 @@ bool btrfs_check_options(const struct btrfs_fs_info *info,
*/
void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info)
{
- if (fs_info->sectorsize < PAGE_SIZE) {
+ if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
+ btrfs_info(fs_info,
+ "forcing free space tree for sector size %u with page size %lu",
+ fs_info->sectorsize, PAGE_SIZE);
btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
- if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
- btrfs_info(fs_info,
- "forcing free space tree for sector size %u with page size %lu",
- fs_info->sectorsize, PAGE_SIZE);
- btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
- }
+ btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
}
/*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e0b0750696a1..1444857de9fe 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -189,7 +189,7 @@ static void do_abort_log_replay(struct walk_control *wc, const char *function,
btrfs_abort_transaction(wc->trans, error);
- if (wc->subvol_path->nodes[0]) {
+ if (wc->subvol_path && wc->subvol_path->nodes[0]) {
btrfs_crit(fs_info,
"subvolume (root %llu) leaf currently being processed:",
btrfs_root_id(wc->root));
@@ -6348,10 +6348,8 @@ again:
* and no keys greater than that, so bail out.
*/
break;
- } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
- min_key->type == BTRFS_INODE_EXTREF_KEY) &&
- (inode->generation == trans->transid ||
- ctx->logging_conflict_inodes)) {
+ } else if (min_key->type == BTRFS_INODE_REF_KEY ||
+ min_key->type == BTRFS_INODE_EXTREF_KEY) {
u64 other_ino = 0;
u64 other_parent = 0;