summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent_io.c68
-rw-r--r--fs/btrfs/fs.h7
-rw-r--r--fs/btrfs/inode.c29
-rw-r--r--fs/btrfs/ordered-data.c5
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/btrfs/subpage.c129
-rw-r--r--fs/btrfs/super.c12
-rw-r--r--fs/btrfs/tree-log.c6
-rw-r--r--fs/erofs/super.c19
-rw-r--r--fs/nfs/namespace.c5
-rw-r--r--fs/nfs/nfs4proc.c13
-rw-r--r--fs/nfs/nfs4trace.h1
-rw-r--r--fs/nfs_common/common.c1
-rw-r--r--fs/nfsd/netns.h2
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs4state.c49
-rw-r--r--fs/nfsd/nfsctl.c12
-rw-r--r--fs/nfsd/nfsd.h1
-rw-r--r--fs/nfsd/nfssvc.c30
-rw-r--r--fs/nfsd/state.h6
-rw-r--r--fs/nfsd/vfs.c4
-rw-r--r--fs/smb/client/nterr.h6
23 files changed, 296 insertions, 133 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3a73d218af46..39fe4385ed36 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3320,7 +3320,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
- fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d8d9f4c95c7a..1e855c5854ce 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1182,7 +1182,7 @@ static bool find_next_delalloc_bitmap(struct folio *folio,
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
const u64 folio_start = folio_pos(folio);
- const unsigned int bitmap_size = fs_info->sectors_per_page;
+ const unsigned int bitmap_size = btrfs_blocks_per_folio(fs_info, folio);
unsigned int start_bit;
unsigned int first_zero;
unsigned int first_set;
@@ -1224,6 +1224,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
const u64 page_start = folio_pos(folio);
const u64 page_end = page_start + folio_size(folio) - 1;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
unsigned long delalloc_bitmap = 0;
/*
* Save the last found delalloc end. As the delalloc end can go beyond
@@ -1249,13 +1250,13 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
/* Save the dirty bitmap as our submission bitmap will be a subset of it. */
if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
- ASSERT(fs_info->sectors_per_page > 1);
+ ASSERT(blocks_per_folio > 1);
btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
} else {
bio_ctrl->submit_bitmap = 1;
}
- for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
u64 start = page_start + (bit << fs_info->sectorsize_bits);
btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
@@ -1322,6 +1323,15 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
wbc);
if (ret >= 0)
last_finished_delalloc_end = found_start + found_len;
+ if (unlikely(ret < 0))
+ btrfs_err_rl(fs_info,
+"failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d",
+ btrfs_root_id(inode->root),
+ btrfs_ino(inode),
+ folio_pos(folio),
+ blocks_per_folio,
+ &bio_ctrl->submit_bitmap,
+ found_start, found_len, ret);
} else {
/*
* We've hit an error during previous delalloc range,
@@ -1364,7 +1374,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
unsigned int bitmap_size = min(
(last_finished_delalloc_end - page_start) >>
fs_info->sectorsize_bits,
- fs_info->sectors_per_page);
+ blocks_per_folio);
for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
btrfs_mark_ordered_io_finished(inode, folio,
@@ -1388,7 +1398,7 @@ out:
* If all ranges are submitted asynchronously, we just need to account
* for them here.
*/
- if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
+ if (bitmap_empty(&bio_ctrl->submit_bitmap, blocks_per_folio)) {
wbc->nr_to_write -= delalloc_to_write;
return 1;
}
@@ -1488,13 +1498,15 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
unsigned long range_bitmap = 0;
bool submitted_io = false;
int found_error = 0;
+ const u64 end = start + len;
const u64 folio_start = folio_pos(folio);
+ const u64 folio_end = folio_start + folio_size(folio);
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
u64 cur;
int bit;
int ret = 0;
- ASSERT(start >= folio_start &&
- start + len <= folio_start + folio_size(folio));
+ ASSERT(start >= folio_start && end <= folio_end);
ret = btrfs_writepage_cow_fixup(folio);
if (ret) {
@@ -1504,19 +1516,36 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
return 1;
}
- for (cur = start; cur < start + len; cur += fs_info->sectorsize)
+ for (cur = start; cur < end; cur += fs_info->sectorsize)
set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
- fs_info->sectors_per_page);
+ blocks_per_folio);
bio_ctrl->end_io_func = end_bbio_data_write;
- for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
if (cur >= i_size) {
+ struct btrfs_ordered_extent *ordered;
+ unsigned long flags;
+
+ ordered = btrfs_lookup_first_ordered_range(inode, cur,
+ fs_info->sectorsize);
+ /*
+ * We have just run delalloc before getting here, so
+ * there must be an ordered extent.
+ */
+ ASSERT(ordered != NULL);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
+ set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
+ ordered->truncated_len = min(ordered->truncated_len,
+ cur - ordered->file_offset);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ btrfs_put_ordered_extent(ordered);
+
btrfs_mark_ordered_io_finished(inode, folio, cur,
- start + len - cur, true);
+ fs_info->sectorsize, true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
@@ -1525,9 +1554,8 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
- btrfs_folio_clear_dirty(fs_info, folio, cur,
- start + len - cur);
- break;
+ btrfs_folio_clear_dirty(fs_info, folio, cur, fs_info->sectorsize);
+ continue;
}
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
if (unlikely(ret < 0)) {
@@ -1586,6 +1614,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
size_t pg_offset;
loff_t i_size = i_size_read(&inode->vfs_inode);
unsigned long end_index = i_size >> PAGE_SHIFT;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
@@ -1621,6 +1650,12 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
PAGE_SIZE, bio_ctrl, i_size);
if (ret == 1)
return 0;
+ if (ret < 0)
+ btrfs_err_rl(fs_info,
+"failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(folio), blocks_per_folio,
+ &bio_ctrl->submit_bitmap, ret);
bio_ctrl->wbc->nr_to_write--;
@@ -1914,9 +1949,10 @@ static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
u64 folio_start = folio_pos(folio);
int bit_start = 0;
int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
/* Lock and write each dirty extent buffers in the range */
- while (bit_start < fs_info->sectors_per_page) {
+ while (bit_start < blocks_per_folio) {
struct btrfs_subpage *subpage = folio_get_private(folio);
struct extent_buffer *eb;
unsigned long flags;
@@ -1932,7 +1968,7 @@ static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
break;
}
spin_lock_irqsave(&subpage->lock, flags);
- if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
+ if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * blocks_per_folio,
subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
spin_unlock(&folio->mapping->i_private_lock);
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 374843aca60d..5c8d6149e142 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -708,7 +708,6 @@ struct btrfs_fs_info {
* running.
*/
refcount_t scrub_workers_refcnt;
- u32 sectors_per_page;
struct workqueue_struct *scrub_workers;
struct btrfs_discard_ctl discard_ctl;
@@ -976,6 +975,12 @@ static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 siz
return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
}
+static inline unsigned int btrfs_blocks_per_folio(const struct btrfs_fs_info *fs_info,
+ const struct folio *folio)
+{
+ return folio_size(folio) >> fs_info->sectorsize_bits;
+}
+
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
enum btrfs_exclusive_operation type);
bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ce13b0ec978e..b1d450459f73 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1159,19 +1159,14 @@ static void submit_uncompressed_range(struct btrfs_inode *inode,
&wbc, false);
wbc_detach_inode(&wbc);
if (ret < 0) {
- btrfs_cleanup_ordered_extents(inode, locked_folio,
- start, end - start + 1);
- if (locked_folio) {
- const u64 page_start = folio_pos(locked_folio);
-
- folio_start_writeback(locked_folio);
- folio_end_writeback(locked_folio);
- btrfs_mark_ordered_io_finished(inode, locked_folio,
- page_start, PAGE_SIZE,
- !ret);
- mapping_set_error(locked_folio->mapping, ret);
- folio_unlock(locked_folio);
- }
+ btrfs_cleanup_ordered_extents(inode, NULL, start, end - start + 1);
+ if (locked_folio)
+ btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
+ start, async_extent->ram_size);
+ btrfs_err_rl(inode->root->fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start, async_extent->ram_size, ret);
}
}
@@ -1632,6 +1627,10 @@ out_unlock:
&cached, clear_bits, page_ops);
btrfs_qgroup_free_data(inode, NULL, start, end - start + 1, NULL);
}
+ btrfs_err_rl(fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), orig_start, end + 1 - orig_start, ret);
return ret;
}
@@ -2382,6 +2381,10 @@ error:
btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL);
}
btrfs_free_path(path);
+ btrfs_err_rl(fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start, end + 1 - start, ret);
return ret;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 880f9553d79d..6ac254a52907 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1080,8 +1080,9 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
struct rb_node *prev;
struct rb_node *next;
struct btrfs_ordered_extent *entry = NULL;
+ unsigned long flags;
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
node = inode->ordered_tree.rb_node;
/*
* Here we don't want to use tree_search() which will use tree->last
@@ -1136,7 +1137,7 @@ out:
trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
}
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
return entry;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 3c77f3506faf..029017afaf34 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3295,9 +3295,15 @@ static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
{
struct btrfs_qgroup *src;
struct btrfs_qgroup *parent;
+ struct btrfs_qgroup *qgroup;
struct btrfs_qgroup_list *list;
+ LIST_HEAD(qgroup_list);
+ const u32 nodesize = fs_info->nodesize;
int nr_parents = 0;
+ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_FULL)
+ return 0;
+
src = find_qgroup_rb(fs_info, srcid);
if (!src)
return -ENOENT;
@@ -3332,8 +3338,19 @@ static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
if (parent->excl != parent->rfer)
return 1;
- parent->excl += fs_info->nodesize;
- parent->rfer += fs_info->nodesize;
+ qgroup_iterator_add(&qgroup_list, parent);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
+ qgroup->rfer += nodesize;
+ qgroup->rfer_cmpr += nodesize;
+ qgroup->excl += nodesize;
+ qgroup->excl_cmpr += nodesize;
+ qgroup_dirty(fs_info, qgroup);
+
+ /* Append parent qgroups to @qgroup_list. */
+ list_for_each_entry(list, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, list->group);
+ }
+ qgroup_iterator_clean(&qgroup_list);
return 0;
}
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 71a56aaac7ad..7e5ecc12b732 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -93,6 +93,9 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
{
struct btrfs_subpage *subpage;
+ /* For metadata we don't support large folio yet. */
+ ASSERT(!folio_test_large(folio));
+
/*
* We have cases like a dummy extent buffer page, which is not mapped
* and doesn't need to be locked.
@@ -134,7 +137,8 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
ASSERT(fs_info->sectorsize < PAGE_SIZE);
real_size = struct_size(ret, bitmaps,
- BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
+ BITS_TO_LONGS(btrfs_bitmap_nr_max *
+ (PAGE_SIZE >> fs_info->sectorsize_bits)));
ret = kzalloc(real_size, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -211,11 +215,13 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
({ \
- unsigned int __start_bit; \
+ unsigned int __start_bit; \
+ const unsigned int blocks_per_folio = \
+ btrfs_blocks_per_folio(fs_info, folio); \
\
btrfs_subpage_assert(fs_info, folio, start, len); \
__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
- __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
+ __start_bit += blocks_per_folio * btrfs_bitmap_nr_##name; \
__start_bit; \
})
@@ -323,7 +329,8 @@ void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, unsigned long bitmap)
{
struct btrfs_subpage *subpage = folio_get_private(folio);
- const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
+ const int start_bit = blocks_per_folio * btrfs_bitmap_nr_locked;
unsigned long flags;
bool last = false;
int cleared = 0;
@@ -341,7 +348,7 @@ void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
}
spin_lock_irqsave(&subpage->lock, flags);
- for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) {
+ for_each_set_bit(bit, &bitmap, blocks_per_folio) {
if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
cleared++;
}
@@ -352,15 +359,27 @@ void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
folio_unlock(folio);
}
-#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
+#define subpage_test_bitmap_all_set(fs_info, folio, name) \
+({ \
+ struct btrfs_subpage *subpage = folio_get_private(folio); \
+ const unsigned int blocks_per_folio = \
+ btrfs_blocks_per_folio(fs_info, folio); \
+ \
bitmap_test_range_all_set(subpage->bitmaps, \
- fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
- fs_info->sectors_per_page)
+ blocks_per_folio * btrfs_bitmap_nr_##name, \
+ blocks_per_folio); \
+})
-#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
+#define subpage_test_bitmap_all_zero(fs_info, folio, name) \
+({ \
+ struct btrfs_subpage *subpage = folio_get_private(folio); \
+ const unsigned int blocks_per_folio = \
+ btrfs_blocks_per_folio(fs_info, folio); \
+ \
bitmap_test_range_all_zero(subpage->bitmaps, \
- fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
- fs_info->sectors_per_page)
+ blocks_per_folio * btrfs_bitmap_nr_##name, \
+ blocks_per_folio); \
+})
void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
@@ -372,7 +391,7 @@ void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
spin_lock_irqsave(&subpage->lock, flags);
bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
+ if (subpage_test_bitmap_all_set(fs_info, folio, uptodate))
folio_mark_uptodate(folio);
spin_unlock_irqrestore(&subpage->lock, flags);
}
@@ -426,7 +445,7 @@ bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
spin_lock_irqsave(&subpage->lock, flags);
bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
+ if (subpage_test_bitmap_all_zero(fs_info, folio, dirty))
last = true;
spin_unlock_irqrestore(&subpage->lock, flags);
return last;
@@ -484,7 +503,7 @@ void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
spin_lock_irqsave(&subpage->lock, flags);
bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
+ if (subpage_test_bitmap_all_zero(fs_info, folio, writeback)) {
ASSERT(folio_test_writeback(folio));
folio_end_writeback(folio);
}
@@ -515,7 +534,7 @@ void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
spin_lock_irqsave(&subpage->lock, flags);
bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
+ if (subpage_test_bitmap_all_zero(fs_info, folio, ordered))
folio_clear_ordered(folio);
spin_unlock_irqrestore(&subpage->lock, flags);
}
@@ -530,7 +549,7 @@ void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
spin_lock_irqsave(&subpage->lock, flags);
bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
+ if (subpage_test_bitmap_all_set(fs_info, folio, checked))
folio_set_checked(folio);
spin_unlock_irqrestore(&subpage->lock, flags);
}
@@ -652,6 +671,31 @@ IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
folio_test_checked);
+#define GET_SUBPAGE_BITMAP(fs_info, folio, name, dst) \
+{ \
+ const unsigned int blocks_per_folio = \
+ btrfs_blocks_per_folio(fs_info, folio); \
+ const struct btrfs_subpage *subpage = folio_get_private(folio); \
+ \
+ ASSERT(blocks_per_folio < BITS_PER_LONG); \
+ *dst = bitmap_read(subpage->bitmaps, \
+ blocks_per_folio * btrfs_bitmap_nr_##name, \
+ blocks_per_folio); \
+}
+
+#define SUBPAGE_DUMP_BITMAP(fs_info, folio, name, start, len) \
+{ \
+ unsigned long bitmap; \
+ const unsigned int blocks_per_folio = \
+ btrfs_blocks_per_folio(fs_info, folio); \
+ \
+ GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap); \
+ btrfs_warn(fs_info, \
+ "dumpping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
+ start, len, folio_pos(folio), \
+ blocks_per_folio, &bitmap); \
+}
+
/*
* Make sure not only the page dirty bit is cleared, but also subpage dirty bit
* is cleared.
@@ -677,6 +721,10 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
subpage = folio_get_private(folio);
ASSERT(subpage);
spin_lock_irqsave(&subpage->lock, flags);
+ if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, dirty, start, len);
+ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ }
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
spin_unlock_irqrestore(&subpage->lock, flags);
}
@@ -706,28 +754,21 @@ void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
nbits = len >> fs_info->sectorsize_bits;
spin_lock_irqsave(&subpage->lock, flags);
/* Target range should not yet be locked. */
- ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, locked, start, len);
+ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ }
bitmap_set(subpage->bitmaps, start_bit, nbits);
ret = atomic_add_return(nbits, &subpage->nr_locked);
- ASSERT(ret <= fs_info->sectors_per_page);
+ ASSERT(ret <= btrfs_blocks_per_folio(fs_info, folio));
spin_unlock_irqrestore(&subpage->lock, flags);
}
-#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
-{ \
- const int sectors_per_page = fs_info->sectors_per_page; \
- \
- ASSERT(sectors_per_page < BITS_PER_LONG); \
- *dst = bitmap_read(subpage->bitmaps, \
- sectors_per_page * btrfs_bitmap_nr_##name, \
- sectors_per_page); \
-}
-
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_subpage *subpage;
- const u32 sectors_per_page = fs_info->sectors_per_page;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
unsigned long uptodate_bitmap;
unsigned long dirty_bitmap;
unsigned long writeback_bitmap;
@@ -737,28 +778,28 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
- ASSERT(sectors_per_page > 1);
+ ASSERT(blocks_per_folio > 1);
subpage = folio_get_private(folio);
spin_lock_irqsave(&subpage->lock, flags);
- GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &locked_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, uptodate, &uptodate_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, dirty, &dirty_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, writeback, &writeback_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, ordered, &ordered_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, checked, &checked_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, locked, &locked_bitmap);
spin_unlock_irqrestore(&subpage->lock, flags);
dump_page(folio_page(folio, 0), "btrfs subpage dump");
btrfs_warn(fs_info,
"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
start, len, folio_pos(folio),
- sectors_per_page, &uptodate_bitmap,
- sectors_per_page, &dirty_bitmap,
- sectors_per_page, &locked_bitmap,
- sectors_per_page, &writeback_bitmap,
- sectors_per_page, &ordered_bitmap,
- sectors_per_page, &checked_bitmap);
+ blocks_per_folio, &uptodate_bitmap,
+ blocks_per_folio, &dirty_bitmap,
+ blocks_per_folio, &locked_bitmap,
+ blocks_per_folio, &writeback_bitmap,
+ blocks_per_folio, &ordered_bitmap,
+ blocks_per_folio, &checked_bitmap);
}
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
@@ -769,10 +810,10 @@ void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
- ASSERT(fs_info->sectors_per_page > 1);
+ ASSERT(btrfs_blocks_per_folio(fs_info, folio) > 1);
subpage = folio_get_private(folio);
spin_lock_irqsave(&subpage->lock, flags);
- GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, dirty, ret_bitmap);
spin_unlock_irqrestore(&subpage->lock, flags);
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b0d4ad7fbe48..833602511f62 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -722,14 +722,12 @@ bool btrfs_check_options(const struct btrfs_fs_info *info,
*/
void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info)
{
- if (fs_info->sectorsize < PAGE_SIZE) {
+ if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
+ btrfs_info(fs_info,
+ "forcing free space tree for sector size %u with page size %lu",
+ fs_info->sectorsize, PAGE_SIZE);
btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
- if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
- btrfs_info(fs_info,
- "forcing free space tree for sector size %u with page size %lu",
- fs_info->sectorsize, PAGE_SIZE);
- btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
- }
+ btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
}
/*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 4ed0fcf1fa7d..fa1199fb6b3d 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -6032,10 +6032,8 @@ again:
* and no keys greater than that, so bail out.
*/
break;
- } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
- min_key->type == BTRFS_INODE_EXTREF_KEY) &&
- (inode->generation == trans->transid ||
- ctx->logging_conflict_inodes)) {
+ } else if (min_key->type == BTRFS_INODE_REF_KEY ||
+ min_key->type == BTRFS_INODE_EXTREF_KEY) {
u64 other_ino = 0;
u64 other_parent = 0;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 027fd567a4d9..bc968cf812ba 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -641,14 +641,21 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
* fs contexts (including its own) due to self-controlled RO
* accesses/contexts and no side-effect changes that need to
* context save & restore so it can reuse the current thread
- * context. However, it still needs to bump `s_stack_depth` to
- * avoid kernel stack overflow from nested filesystems.
+ * context.
+ * However, we still need to prevent kernel stack overflow due
+ * to filesystem nesting: just ensure that s_stack_depth is 0
+ * to disallow mounting EROFS on stacked filesystems.
+ * Note: s_stack_depth is not incremented here for now, since
+ * EROFS is the only fs supporting file-backed mounts for now.
+ * It MUST change if another fs plans to support them, which
+ * may also require adjusting FILESYSTEM_MAX_STACK_DEPTH.
*/
if (erofs_is_fileio_mode(sbi)) {
- sb->s_stack_depth =
- file_inode(sbi->dif0.file)->i_sb->s_stack_depth + 1;
- if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
- erofs_err(sb, "maximum fs stacking depth exceeded");
+ inode = file_inode(sbi->dif0.file);
+ if ((inode->i_sb->s_op == &erofs_sops &&
+ !inode->i_sb->s_bdev) ||
+ inode->i_sb->s_stack_depth) {
+ erofs_err(sb, "file-backed mounts cannot be applied to stacked fses");
return -ENOTBLK;
}
}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 923b5c1eb47e..99ef1146096f 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -170,6 +170,11 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (!ctx->clone_data.fattr)
goto out_fc;
+ if (fc->cred != server->cred) {
+ put_cred(fc->cred);
+ fc->cred = get_cred(server->cred);
+ }
+
if (fc->net_ns != client->cl_net) {
put_net(fc->net_ns);
fc->net_ns = get_net(client->cl_net);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 172ff213b50b..89f779f16f0d 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1753,8 +1753,17 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
if (nfs_stateid_is_sequential(state, stateid))
break;
- if (status)
- break;
+ if (status) {
+ if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
+ !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
+ trace_nfs4_open_stateid_update_skip(state->inode,
+ stateid, status);
+ return;
+ } else {
+ break;
+ }
+ }
+
/* Rely on seqids for serialisation with NFSv4.0 */
if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
break;
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 22c973316f0b..9a38a5d3bf51 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -1278,6 +1278,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait);
+DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_skip);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait);
DECLARE_EVENT_CLASS(nfs4_getattr_event,
diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
index af09aed09fd2..0778743ae2c2 100644
--- a/fs/nfs_common/common.c
+++ b/fs/nfs_common/common.c
@@ -17,7 +17,6 @@ static const struct {
{ NFSERR_NOENT, -ENOENT },
{ NFSERR_IO, -EIO },
{ NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
{ NFSERR_ACCES, -EACCES },
{ NFSERR_EXIST, -EEXIST },
{ NFSERR_XDEV, -EXDEV },
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index ceab4a3e503f..09a06e46dab9 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -66,6 +66,8 @@ struct nfsd_net {
struct lock_manager nfsd4_manager;
bool grace_ended;
+ bool grace_end_forced;
+ bool client_tracking_active;
time64_t boot_time;
struct dentry *nfsd_client_dir;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index bc2bb92a624a..05efa10ed84b 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1359,7 +1359,7 @@ try_again:
(schedule_timeout(20*HZ) == 0)) {
finish_wait(&nn->nfsd_ssc_waitq, &wait);
kfree(work);
- return nfserr_eagain;
+ return nfserr_jukebox;
}
finish_wait(&nn->nfsd_ssc_waitq, &wait);
goto try_again;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index eeca4329e1d0..1a15e458b178 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -84,7 +84,7 @@ static u64 current_sessionid = 1;
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
-void nfsd4_end_grace(struct nfsd_net *nn);
+static void nfsd4_end_grace(struct nfsd_net *nn);
static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
static void nfsd4_file_hash_remove(struct nfs4_file *fi);
static void deleg_reaper(struct nfsd_net *nn);
@@ -1743,7 +1743,7 @@ static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp,
/**
* nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
- * @net: used to identify instance of nfsd (there is one per net namespace)
+ * @nn: used to identify instance of nfsd (there is one per net namespace)
* @sb: super_block used to identify target filesystem
*
* All nfs4 states (open, lock, delegation, layout) held by the server instance
@@ -1755,16 +1755,15 @@ static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp,
* The clients which own the states will subsequently being notified that the
* states have been "admin-revoked".
*/
-void nfsd4_revoke_states(struct net *net, struct super_block *sb)
+void nfsd4_revoke_states(struct nfsd_net *nn, struct super_block *sb)
{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
unsigned int idhashval;
unsigned int sc_types;
sc_types = SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG | SC_TYPE_LAYOUT;
spin_lock(&nn->client_lock);
- for (idhashval = 0; idhashval < CLIENT_HASH_MASK; idhashval++) {
+ for (idhashval = 0; idhashval < CLIENT_HASH_SIZE; idhashval++) {
struct list_head *head = &nn->conf_id_hashtbl[idhashval];
struct nfs4_client *clp;
retry:
@@ -6294,7 +6293,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfs_ok;
}
-void
+static void
nfsd4_end_grace(struct nfsd_net *nn)
{
/* do nothing if grace period already ended */
@@ -6327,6 +6326,33 @@ nfsd4_end_grace(struct nfsd_net *nn)
*/
}
+/**
+ * nfsd4_force_end_grace - forcibly end the NFSv4 grace period
+ * @nn: network namespace for the server instance to be updated
+ *
+ * Forces bypass of normal grace period completion, then schedules
+ * the laundromat to end the grace period immediately. Does not wait
+ * for the grace period to fully terminate before returning.
+ *
+ * Return values:
+ * %true: Grace termination schedule
+ * %false: No action was taken
+ */
+bool nfsd4_force_end_grace(struct nfsd_net *nn)
+{
+ if (!nn->client_tracking_ops)
+ return false;
+ spin_lock(&nn->client_lock);
+ if (nn->grace_ended || !nn->client_tracking_active) {
+ spin_unlock(&nn->client_lock);
+ return false;
+ }
+ WRITE_ONCE(nn->grace_end_forced, true);
+ mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
+ spin_unlock(&nn->client_lock);
+ return true;
+}
+
/*
* If we've waited a lease period but there are still clients trying to
* reclaim, wait a little longer to give them a chance to finish.
@@ -6336,6 +6362,8 @@ static bool clients_still_reclaiming(struct nfsd_net *nn)
time64_t double_grace_period_end = nn->boot_time +
2 * nn->nfsd4_lease;
+ if (READ_ONCE(nn->grace_end_forced))
+ return false;
if (nn->track_reclaim_completes &&
atomic_read(&nn->nr_reclaim_complete) ==
nn->reclaim_str_hashtbl_size)
@@ -8655,6 +8683,8 @@ static int nfs4_state_create_net(struct net *net)
nn->unconf_name_tree = RB_ROOT;
nn->boot_time = ktime_get_real_seconds();
nn->grace_ended = false;
+ nn->grace_end_forced = false;
+ nn->client_tracking_active = false;
nn->nfsd4_manager.block_opens = true;
INIT_LIST_HEAD(&nn->nfsd4_manager.list);
INIT_LIST_HEAD(&nn->client_lru);
@@ -8735,6 +8765,10 @@ nfs4_state_start_net(struct net *net)
return ret;
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
+ /* safe for laundromat to run now */
+ spin_lock(&nn->client_lock);
+ nn->client_tracking_active = true;
+ spin_unlock(&nn->client_lock);
if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
goto skip_grace;
printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
@@ -8775,6 +8809,9 @@ nfs4_state_shutdown_net(struct net *net)
shrinker_free(nn->nfsd_client_shrinker);
cancel_work_sync(&nn->nfsd_shrinker_work);
+ spin_lock(&nn->client_lock);
+ nn->client_tracking_active = false;
+ spin_unlock(&nn->client_lock);
cancel_delayed_work_sync(&nn->laundromat_work);
locks_end_grace(&nn->nfsd4_manager);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index dcaa31706394..eb012f943912 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -262,6 +262,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
struct path path;
char *fo_path;
int error;
+ struct nfsd_net *nn;
/* sanity check */
if (size == 0)
@@ -288,7 +289,13 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
* 3. Is that directory the root of an exported file system?
*/
error = nlmsvc_unlock_all_by_sb(path.dentry->d_sb);
- nfsd4_revoke_states(netns(file), path.dentry->d_sb);
+ mutex_lock(&nfsd_mutex);
+ nn = net_generic(netns(file), nfsd_net_id);
+ if (nn->nfsd_serv)
+ nfsd4_revoke_states(nn, path.dentry->d_sb);
+ else
+ error = -EINVAL;
+ mutex_unlock(&nfsd_mutex);
path_put(&path);
return error;
@@ -1123,10 +1130,9 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
case 'Y':
case 'y':
case '1':
- if (!nn->nfsd_serv)
+ if (!nfsd4_force_end_grace(nn))
return -EBUSY;
trace_nfsd_end_grace(netns(file));
- nfsd4_end_grace(nn);
break;
default:
return -EINVAL;
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index df5f633cc14b..ad0af259e98f 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -226,7 +226,6 @@ void nfsd_lockd_shutdown(void);
#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
#define nfserr_io cpu_to_be32(NFSERR_IO)
#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
-#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index cc185c00e309..88c15b49e4bd 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -434,26 +434,26 @@ static void nfsd_shutdown_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- if (!nn->nfsd_net_up)
- return;
-
- percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done);
- wait_for_completion(&nn->nfsd_net_confirm_done);
-
- nfsd_export_flush(net);
- nfs4_state_shutdown_net(net);
- nfsd_reply_cache_shutdown(nn);
- nfsd_file_cache_shutdown_net(net);
- if (nn->lockd_up) {
- lockd_down(net);
- nn->lockd_up = false;
+ if (nn->nfsd_net_up) {
+ percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done);
+ wait_for_completion(&nn->nfsd_net_confirm_done);
+
+ nfsd_export_flush(net);
+ nfs4_state_shutdown_net(net);
+ nfsd_reply_cache_shutdown(nn);
+ nfsd_file_cache_shutdown_net(net);
+ if (nn->lockd_up) {
+ lockd_down(net);
+ nn->lockd_up = false;
+ }
+ wait_for_completion(&nn->nfsd_net_free_done);
}
- wait_for_completion(&nn->nfsd_net_free_done);
percpu_ref_exit(&nn->nfsd_net_ref);
+ if (nn->nfsd_net_up)
+ nfsd_shutdown_generic();
nn->nfsd_net_up = false;
- nfsd_shutdown_generic();
}
static DEFINE_SPINLOCK(nfsd_notifier_lock);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 35b3564c065f..609487d35959 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -759,15 +759,15 @@ static inline void get_nfs4_file(struct nfs4_file *fi)
struct nfsd_file *find_any_file(struct nfs4_file *f);
#ifdef CONFIG_NFSD_V4
-void nfsd4_revoke_states(struct net *net, struct super_block *sb);
+void nfsd4_revoke_states(struct nfsd_net *nn, struct super_block *sb);
#else
-static inline void nfsd4_revoke_states(struct net *net, struct super_block *sb)
+static inline void nfsd4_revoke_states(struct nfsd_net *nn, struct super_block *sb)
{
}
#endif
/* grace period management */
-void nfsd4_end_grace(struct nfsd_net *nn);
+bool nfsd4_force_end_grace(struct nfsd_net *nn);
/* nfs4recover operations */
extern int nfsd4_client_tracking_init(struct net *net);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8c4f4e2f9cee..08c8babfdd75 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -2568,8 +2568,8 @@ nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
/* Allow read access to binaries even when mode 111 */
if (err == -EACCES && S_ISREG(inode->i_mode) &&
- (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
- acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
+ (((acc & NFSD_MAY_MASK) == NFSD_MAY_READ) &&
+ (acc & (NFSD_MAY_OWNER_OVERRIDE | NFSD_MAY_READ_IF_EXEC))))
err = inode_permission(&nop_mnt_idmap, inode, MAY_EXEC);
return err? nfserrno(err) : 0;
diff --git a/fs/smb/client/nterr.h b/fs/smb/client/nterr.h
index edd4741cab0a..e3a341316a71 100644
--- a/fs/smb/client/nterr.h
+++ b/fs/smb/client/nterr.h
@@ -41,10 +41,10 @@ extern const struct nt_err_code_struct nt_errs[];
#define NT_STATUS_MEDIA_CHANGED 0x8000001c
#define NT_STATUS_END_OF_MEDIA 0x8000001e
#define NT_STATUS_MEDIA_CHECK 0x80000020
-#define NT_STATUS_NO_DATA_DETECTED 0x8000001c
+#define NT_STATUS_NO_DATA_DETECTED 0x80000022
#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d
#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288
-#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288
+#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000289
#define NT_STATUS_UNSUCCESSFUL 0xC0000000 | 0x0001
#define NT_STATUS_NOT_IMPLEMENTED 0xC0000000 | 0x0002
#define NT_STATUS_INVALID_INFO_CLASS 0xC0000000 | 0x0003
@@ -70,7 +70,7 @@ extern const struct nt_err_code_struct nt_errs[];
#define NT_STATUS_NO_MEMORY 0xC0000000 | 0x0017
#define NT_STATUS_CONFLICTING_ADDRESSES 0xC0000000 | 0x0018
#define NT_STATUS_NOT_MAPPED_VIEW 0xC0000000 | 0x0019
-#define NT_STATUS_UNABLE_TO_FREE_VM 0x80000000 | 0x001a
+#define NT_STATUS_UNABLE_TO_FREE_VM 0xC0000000 | 0x001a
#define NT_STATUS_UNABLE_TO_DELETE_SECTION 0xC0000000 | 0x001b
#define NT_STATUS_INVALID_SYSTEM_SERVICE 0xC0000000 | 0x001c
#define NT_STATUS_ILLEGAL_INSTRUCTION 0xC0000000 | 0x001d