Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

RST delete ci run #1435

Closed
wants to merge 80 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
80 commits
Select commit Hold shift + click to select a range
fe46c64
btrfs: don't take dev_replace rwsem on task already holding it
morbidrsa Sep 10, 2024
5b3b62a
btrfs: make assert_rbio() to only check CONFIG_BTRFS_ASSERT
adam900710 Sep 16, 2024
8f83607
btrfs: split out CONFIG_BTRFS_EXPERIMENTAL from CONFIG_BTRFS_DEBUG
adam900710 Sep 16, 2024
18c2be4
btrfs: zlib: make the compression path to handle sector size < page size
adam900710 Sep 6, 2024
17a51a0
btrfs: zstd: make the compression path to handle sector size < page size
adam900710 Sep 6, 2024
6eb293f
btrfs: compression: add an ASSERT() to ensure the read-in length is sane
adam900710 Sep 6, 2024
50e2162
btrfs: wait for writeback if sector size is smaller than page size
adam900710 Sep 8, 2024
2923eaf
btrfs: make extent_range_clear_dirty_for_io() to handle sector size <…
adam900710 Sep 10, 2024
5ce7471
btrfs: do not assume the full page range is not dirty in extent_write…
adam900710 Sep 10, 2024
938449b
btrfs: move the delalloc range bitmap search into extent_io.c
adam900710 Sep 15, 2024
81b8cc5
btrfs: mark all dirty sectors as locked inside writepage_delalloc()
adam900710 Sep 15, 2024
6326713
btrfs: allow compression even if the range is not page aligned
adam900710 Sep 6, 2024
f7f6d8e
btrfs: avoid unnecessary device path update for the same device
adam900710 Sep 24, 2024
4f0ed68
btrfs: canonicalize the device path before adding it
adam900710 Sep 24, 2024
9078516
btrfs: remove code duplication in ordered extent finishing
morbidrsa Sep 27, 2024
d217a8f
btrfs: qgroups: remove bytenr field from struct btrfs_qgroup_extent_r…
fdmanana Sep 24, 2024
bc10046
btrfs: store fs_info in a local variable at btrfs_qgroup_trace_extent…
fdmanana Sep 24, 2024
7d2835e
btrfs: remove unnecessary delayed refs locking at btrfs_qgroup_trace_…
fdmanana Sep 24, 2024
ad0bb2c
btrfs: always use delayed_refs local variable at btrfs_qgroup_trace_e…
fdmanana Sep 24, 2024
c8421bc
btrfs: remove pointless initialization at btrfs_qgroup_trace_extent()
fdmanana Sep 24, 2024
e7f6492
btrfs: remove redundant stop_loop variable in scrub_stripe()
Ryand1234 Sep 26, 2024
21ac0bf
btrfs: remove unused page_to_inode and page_to_fs_info macros
Sep 24, 2024
0647aa3
btrfs: correct typos in multiple comments across various files
Sep 24, 2024
9c74f2c
btrfs: tests: add selftests for raid-stripe-tree
morbidrsa Oct 2, 2024
1bbafcc
btrfs: remove unused btrfs_free_squota_rsv()
Oct 3, 2024
3d8ac55
btrfs: remove unused btrfs_is_parity_mirror()
Oct 3, 2024
7f92863
btrfs: remove unused btrfs_try_tree_write_lock()
Oct 3, 2024
3b7324c
btrfs: remove the dirty_page local variable
adam900710 Oct 1, 2024
a85e63f
btrfs: simplify the page uptodate preparation for prepare_pages()
adam900710 Oct 1, 2024
df5af25
btrfs: handle empty list of NOCOW ordered extents with checksum list
morbidrsa Oct 4, 2024
659f41e
btrfs: return ENODATA in case RST lookup fails
morbidrsa Oct 7, 2024
e081590
btrfs: scrub: skip initial RST lookup errors
morbidrsa Oct 7, 2024
8162aaa
btrfs: qgroup: run delayed iputs after ordered extent completion
fdmanana Oct 7, 2024
0b2308e
btrfs: remove btrfs_set_range_writeback()
adam900710 Oct 6, 2024
bde9f20
btrfs: zstd: assert the timer pointer in callback
kdave Oct 9, 2024
1d19c12
btrfs: drop unused parameter path from btrfs_tree_mod_log_rewind()
kdave Oct 9, 2024
3994109
btrfs: drop unused parameter ctx from batch_delete_dir_index_items()
kdave Oct 9, 2024
4995054
btrfs: drop unused parameter fs_info from wait_reserve_ticket()
kdave Oct 9, 2024
51f6c3a
btrfs: drop unused parameter fs_info from do_reclaim_sweep()
kdave Oct 9, 2024
8721e68
btrfs: send: drop unused parameter num from iterate_inode_ref_t callb…
kdave Oct 9, 2024
3555fea
btrfs: send: drop unused parameter index from iterate_inode_ref_t cal…
kdave Oct 9, 2024
2aa366d
btrfs: scrub: drop unused parameter sctx from scrub_submit_extent_sec…
kdave Oct 9, 2024
e6c00f4
btrfs: drop unused parameter map from scrub_simple_mirror()
kdave Oct 9, 2024
f9bd555
btrfs: qgroup: drop unused parameter fs_info from __del_qgroup_rb()
kdave Oct 9, 2024
e3c79ce
btrfs: drop unused transaction parameter from btrfs_qgroup_add_swappe…
kdave Oct 9, 2024
3beb0db
btrfs: lzo: drop unused paramter level from lzo_alloc_workspace()
kdave Oct 9, 2024
7e22750
btrfs: drop unused parameter argp from btrfs_ioctl_quota_rescan_wait()
kdave Oct 9, 2024
24f6fd6
btrfs: drop unused parameter inode from read_inline_extent()
kdave Oct 9, 2024
05d2682
btrfs: drop unused parameter offset from __cow_file_range_inline()
kdave Oct 9, 2024
2c007d2
btrfs: drop unused parameter file_offset from btrfs_encoded_read_regu…
kdave Oct 9, 2024
3c09d3b
btrfs: drop unused parameter iov_iter from btrfs_write_check()
kdave Oct 9, 2024
afe6a70
btrfs: drop unused parameter refs from visit_node_for_delete()
kdave Oct 9, 2024
ce8e39d
btrfs: drop unused parameter mask from try_release_extent_state()
kdave Oct 9, 2024
2f3f009
btrfs: drop unused parameter fs_info from folio_range_has_eb()
kdave Oct 9, 2024
4836659
btrfs: drop unused parameter options from open_ctree()
kdave Oct 9, 2024
3fcafae
btrfs: drop unused parameter data from btrfs_fill_super()
kdave Oct 9, 2024
bea54c5
btrfs: drop unused parameter transaction from alloc_log_tree()
kdave Oct 9, 2024
9c0eded
btrfs: drop unused parameter fs_info from btrfs_match_dir_item_name()
kdave Oct 9, 2024
caebb14
btrfs: drop unused parameter level from alloc_heuristic_ws()
kdave Oct 9, 2024
9bbc899
btrfs: zoned: fix zone unusable accounting for freed reserved extent
naota Oct 1, 2024
467b190
btrfs: fix error propagation of split bios
naota Oct 9, 2024
b54ce3d
btrfs: clear force-compress on remount when compress mount option is …
fdmanana Oct 14, 2024
b18732b
btrfs: reduce lock contention when eb cache miss for btree search
Oct 15, 2024
7e1135c
btrfs: add and use helper to remove extent map from its inode's tree
fdmanana Sep 6, 2024
d4eefbc
btrfs: make the extent map shrinker run asynchronously as a work queu…
fdmanana Aug 29, 2024
ad6f27e
btrfs: simplify tracking progress for the extent map shrinker
fdmanana Sep 4, 2024
94a09da
btrfs: rename extent map shrinker members from struct btrfs_fs_info
fdmanana Sep 5, 2024
5a6cada
btrfs: re-enable the extent map shrinker
fdmanana Sep 17, 2024
0f9677a
btrfs: remove redundant level argument from read_block_for_search()
fdmanana Oct 16, 2024
7f066ce
btrfs: simplify arguments for btrfs_verify_level_key()
fdmanana Oct 16, 2024
28cea0d
btrfs: remove redundant initializations for struct btrfs_tree_parent_…
fdmanana Oct 16, 2024
7e3a5ca
btrfs: remove local generation variable from read_block_for_search()
fdmanana Oct 16, 2024
78a423d
btrfs: do not clear read-only when adding sprout device
boryas Oct 15, 2024
8a40d12
btrfs: qgroup: set a more sane default value for subtree drop threshold
adam900710 Sep 10, 2024
cd9721f
btrfs: fix the delalloc range locking if sector size < page size
adam900710 Oct 8, 2024
3efe27c
btrfs: remove unused btrfs_folio_start_writer_lock()
adam900710 Oct 8, 2024
9d64856
btrfs: unify to use writer locks for subpage locking
adam900710 Oct 9, 2024
2be4c90
btrfs: rename btrfs_folio_(set|start|end)_writer_lock()
adam900710 Oct 9, 2024
6eba246
btrfs: implement partial deletion of RAID stripe extents
morbidrsa Oct 8, 2024
7269e1c
btrfs: implement self-tests for partial RAID srtipe-tree delete
morbidrsa Oct 8, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions fs/btrfs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,32 @@ config BTRFS_ASSERT

If unsure, say N.

config BTRFS_EXPERIMENTAL
bool "Btrfs experimental features"
depends on BTRFS_FS
default n
help
Enable experimental features. These features may not be stable enough
for end users. This is meant for btrfs developers or users who wish
to test the functionality and report problems.

Current list:

- extent map shrinker - performance problems with too frequent shrinks

- send stream protocol v3 - fs-verity support

- checksum offload mode - sysfs knob to affect when checksums are
calculated (at IO time, or in a thread)

- raid-stripe-tree - additional mapping of extents to devices to
support RAID1* profiles on zoned devices,
RAID56 not yet supported

- extent tree v2 - complex rework of extent tracking

If unsure, say N.

config BTRFS_FS_REF_VERIFY
bool "Btrfs with the ref verify tool compiled in"
depends on BTRFS_FS
Expand Down
3 changes: 2 additions & 1 deletion fs/btrfs/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,5 @@ btrfs-$(CONFIG_FS_VERITY) += verity.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
tests/free-space-tree-tests.o tests/extent-map-tests.o
tests/free-space-tree-tests.o tests/extent-map-tests.o \
tests/raid-stripe-tree-tests.o
39 changes: 14 additions & 25 deletions fs/btrfs/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
bbio->end_io = end_io;
bbio->private = private;
atomic_set(&bbio->pending_ios, 1);
WRITE_ONCE(bbio->status, BLK_STS_OK);
}

/*
Expand Down Expand Up @@ -113,41 +114,29 @@ static void __btrfs_bio_end_io(struct btrfs_bio *bbio)
}
}

static void btrfs_orig_write_end_io(struct bio *bio);

static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
struct btrfs_bio *orig_bbio)
{
/*
* For writes we tolerate nr_mirrors - 1 write failures, so we can't
* just blindly propagate a write failure here. Instead increment the
* error count in the original I/O context so that it is guaranteed to
* be larger than the error tolerance.
*/
if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) {
struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private;
struct btrfs_io_context *orig_bioc = orig_stripe->bioc;

atomic_add(orig_bioc->max_errors, &orig_bioc->error);
} else {
orig_bbio->bio.bi_status = bbio->bio.bi_status;
}
}

void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{
bbio->bio.bi_status = status;
if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
struct btrfs_bio *orig_bbio = bbio->private;

if (bbio->bio.bi_status)
btrfs_bbio_propagate_error(bbio, orig_bbio);
btrfs_cleanup_bio(bbio);
bbio = orig_bbio;
}

if (atomic_dec_and_test(&bbio->pending_ios))
/*
* At this point, bbio always points to the original btrfs_bio. Save
* the first error in it.
*/
if (status != BLK_STS_OK)
cmpxchg(&bbio->status, BLK_STS_OK, status);

if (atomic_dec_and_test(&bbio->pending_ios)) {
/* Load split bio's error which might be set above. */
if (status == BLK_STS_OK)
bbio->bio.bi_status = READ_ONCE(bbio->status);
__btrfs_bio_end_io(bbio);
}
}

static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
Expand Down Expand Up @@ -598,7 +587,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
{
bool auto_csum_mode = true;

#ifdef CONFIG_BTRFS_DEBUG
#ifdef CONFIG_BTRFS_EXPERIMENTAL
struct btrfs_fs_devices *fs_devices = bbio->fs_info->fs_devices;
enum btrfs_offload_csum_mode csum_mode = READ_ONCE(fs_devices->offload_csum_mode);

Expand Down
3 changes: 3 additions & 0 deletions fs/btrfs/bio.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,9 @@ struct btrfs_bio {
/* File system that this I/O operates on. */
struct btrfs_fs_info *fs_info;

/* Save the first error status of split bio. */
blk_status_t status;

/*
* This member must come last, bio_alloc_bioset will allocate enough
* bytes for entire btrfs_bio but relies on bio being last.
Expand Down
4 changes: 3 additions & 1 deletion fs/btrfs/block-group.c
Original file line number Diff line number Diff line change
Expand Up @@ -2797,7 +2797,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
* uncompressed data size, because the compression is only done
* when writeback triggered and we don't know how much space we
* are actually going to need, so we reserve the uncompressed
* size because the data may be uncompressible in the worst case.
* size because the data may be incompressible in the worst case.
*/
if (ret == 0) {
bool used;
Expand Down Expand Up @@ -3819,6 +3819,8 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
spin_lock(&cache->lock);
if (cache->ro)
space_info->bytes_readonly += num_bytes;
else if (btrfs_is_zoned(cache->fs_info))
space_info->bytes_zone_unusable += num_bytes;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
space_info->max_extent_size = 0;
Expand Down
4 changes: 1 addition & 3 deletions fs/btrfs/btrfs_inode.h
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,6 @@ void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state
struct extent_state *other);
void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *orig, u64 split);
void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
void btrfs_evict_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
Expand Down Expand Up @@ -613,8 +612,7 @@ int btrfs_writepage_cow_fixup(struct folio *folio);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr,
u64 disk_io_size,
u64 disk_bytenr, u64 disk_io_size,
struct page **pages);
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
struct btrfs_ioctl_encoded_io_args *encoded);
Expand Down
12 changes: 7 additions & 5 deletions fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -545,8 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page.
*/
if (fs_info->sectorsize < PAGE_SIZE)
btrfs_subpage_start_reader(fs_info, folio, cur,
add_size);
btrfs_folio_set_lock(fs_info, folio, cur, add_size);
folio_put(folio);
cur += add_size;
}
Expand Down Expand Up @@ -702,7 +701,7 @@ static void free_heuristic_ws(struct list_head *ws)
kfree(workspace);
}

static struct list_head *alloc_heuristic_ws(unsigned int level)
static struct list_head *alloc_heuristic_ws(void)
{
struct heuristic_ws *ws;

Expand Down Expand Up @@ -744,9 +743,9 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
static struct list_head *alloc_workspace(int type, unsigned int level)
{
switch (type) {
case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws();
case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace();
case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
default:
/*
Expand Down Expand Up @@ -1030,13 +1029,16 @@ int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping
{
int type = btrfs_compress_type(type_level);
int level = btrfs_compress_level(type_level);
const unsigned long orig_len = *total_out;
struct list_head *workspace;
int ret;

level = btrfs_compress_set_level(type, level);
workspace = get_workspace(type, level);
ret = compression_compress_pages(type, workspace, mapping, start, folios,
out_folios, total_in, total_out);
/* The total read-in bytes should be no larger than the input. */
ASSERT(*total_in <= orig_len);
put_workspace(type, workspace);
return ret;
}
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/compression.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(unsigned int level);
struct list_head *lzo_alloc_workspace(void);
void lzo_free_workspace(struct list_head *ws);

int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
Expand Down
Loading
Loading