diff --git a/module/zfs/range_tree.c b/module/zfs/range_tree.c index d73195f1a21f..bfff029f0a9f 100644 --- a/module/zfs/range_tree.c +++ b/module/zfs/range_tree.c @@ -503,6 +503,18 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size, rstart = zfs_rs_get_start(rs, rt); rend = zfs_rs_get_end(rs, rt); + /* + * Defensive check: if we detect corrupted bounds, log the issue + * and try to recover rather than panicking + */ + if (rstart > start) { + zfs_panic_recover("zfs: rt=%s: segment bounds invalid - " + "existing start (%llx) > requested start (%llx), " + "this may indicate corrupted space map data", + ZFS_RT_NAME(rt), (longlong_t)rstart, (longlong_t)start); + return; + } + /* * Range trees with gap support must only remove complete segments * from the tree. This allows us to maintain accurate fill accounting diff --git a/module/zfs/space_map.c b/module/zfs/space_map.c index 5f24963f2291..45e3359f1904 100644 --- a/module/zfs/space_map.c +++ b/module/zfs/space_map.c @@ -402,6 +402,21 @@ static int space_map_load_callback(space_map_entry_t *sme, void *arg) { space_map_load_arg_t *smla = arg; + + /* Validate space map entry bounds */ + if (sme->sme_run == 0) { + return (0); + } + + if (sme->sme_offset + sme->sme_run > smla->smla_sm->sm_size) { + zfs_panic_recover("Skipping out-of-bounds space map entry " + "(offset=%llu, size=%llu, sm_size=%llu)", + (unsigned long long)sme->sme_offset, + (unsigned long long)sme->sme_run, + (unsigned long long)smla->smla_sm->sm_size); + return (0); + } + if (sme->sme_type == smla->smla_type) { VERIFY3U(zfs_range_tree_space(smla->smla_rt) + sme->sme_run, <=, smla->smla_sm->sm_size);