[PATCH v3 1/4] btrfs: balance: fix null-ptr-deref in chunk_usage_filter
From: ZhengYuan Huang
Date: Tue Mar 24 2026 - 20:50:40 EST
[BUG]
Running btrfs balance with a usage filter (-dusage=N) can trigger a
null-ptr-deref when metadata corruption causes a chunk to have no
corresponding block group in the in-memory cache:
KASAN: null-ptr-deref in range [0x0000000000000070-0x0000000000000077]
RIP: 0010:chunk_usage_filter fs/btrfs/volumes.c:3874 [inline]
RIP: 0010:should_balance_chunk fs/btrfs/volumes.c:4018 [inline]
RIP: 0010:__btrfs_balance fs/btrfs/volumes.c:4172 [inline]
RIP: 0010:btrfs_balance+0x2024/0x42b0 fs/btrfs/volumes.c:4604
...
Call Trace:
btrfs_ioctl_balance fs/btrfs/ioctl.c:3577 [inline]
btrfs_ioctl+0x25cf/0x5b90 fs/btrfs/ioctl.c:5313
vfs_ioctl fs/ioctl.c:51 [inline]
...
The bug is reproducible on next-20260312.
[CAUSE]
Two separate data structures are involved:
1. The on-disk chunk tree, which records every chunk (logical address
space region) and is iterated by __btrfs_balance().
2. The in-memory block group cache (fs_info->block_group_cache_tree),
which is built at mount time by btrfs_read_block_groups() and holds
a struct btrfs_block_group for each chunk. This cache is what the
usage filter queries.
On a well-formed filesystem, these two are kept in 1:1 correspondence.
However, btrfs_read_block_groups() builds the cache from block group
items in the extent tree, not directly from the chunk tree. A corrupted
image can therefore contain a chunk item in the chunk tree whose
corresponding block group item is absent from the extent tree; that
chunk's block group is then never inserted into the in-memory cache.
When balance iterates the chunk tree and reaches such an orphaned chunk,
should_balance_chunk() calls chunk_usage_filter(), which queries the block
group cache:
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = cache->used; /* cache may be NULL */
btrfs_lookup_block_group() returns NULL silently when no cached entry
covers chunk_offset. chunk_usage_filter() does not check the return value,
so the immediately following dereference of cache->used triggers the crash.
[FIX]
Add a NULL check after btrfs_lookup_block_group() in chunk_usage_filter().
When the lookup fails, emit a btrfs_err() message identifying the
affected bytenr and return -EUCLEAN to indicate filesystem corruption.
Since chunk_usage_filter() now has an error path, change its return type
from bool to int: negative errno on error, 0 if the chunk passes the
usage filter, and 1 if it should be skipped. Update
should_balance_chunk() accordingly to propagate negative errors from the
usage filter path while still returning 0 for chunks that should not be
balanced and 1 for chunks that should be balanced. Finally, handle the
new negative return in __btrfs_balance() by jumping to the existing
error path, which aborts the balance operation and reports the error to
userspace.
After the fix, the same corruption is correctly detected and reported
by the filter, and the null-ptr-deref is no longer triggered.
Fixes: 5ce5b3c0916b ("Btrfs: usage filter")
Signed-off-by: ZhengYuan Huang <gality369@xxxxxxxxx>
---
fs/btrfs/volumes.c | 32 +++++++++++++++++++++++---------
1 file changed, 23 insertions(+), 9 deletions(-)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2bec544d8ba3..1eca5fa6bdaa 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3863,14 +3863,20 @@ static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_of
return ret;
}
-static bool chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
- struct btrfs_balance_args *bargs)
+static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_block_group *cache;
u64 chunk_used, user_thresh;
- bool ret = true;
+ int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+ if (!cache) {
+ btrfs_err(fs_info,
+ "balance: chunk at bytenr %llu has no corresponding block group",
+ chunk_offset);
+ return -EUCLEAN;
+ }
chunk_used = cache->used;
if (bargs->usage_min == 0)
@@ -3881,7 +3887,7 @@ static bool chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
user_thresh = mult_perc(cache->length, bargs->usage);
if (chunk_used < user_thresh)
- ret = false;
+ ret = 0;
btrfs_put_block_group(cache);
return ret;
@@ -3986,8 +3992,8 @@ static bool chunk_soft_convert_filter(u64 chunk_type, struct btrfs_balance_args
return false;
}
-static bool should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
- u64 chunk_offset)
+static int should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
@@ -4014,9 +4020,13 @@ static bool should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk
}
/* usage filter */
- if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
- chunk_usage_filter(fs_info, chunk_offset, bargs)) {
- return false;
+ if (bargs->flags & BTRFS_BALANCE_ARGS_USAGE) {
+ int ret2 = chunk_usage_filter(fs_info, chunk_offset, bargs);
+
+ if (ret2 < 0)
+ return ret2;
+ if (ret2)
+ return false;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
return false;
@@ -4172,6 +4182,10 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
ret = should_balance_chunk(leaf, chunk, found_key.offset);
btrfs_release_path(path);
+ if (ret < 0) {
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ goto error;
+ }
if (!ret) {
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto loop;
--
2.43.0