Skip to content

Allow and prefer special vdevs as ZIL #17505

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion cmd/zdb/zdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -6750,6 +6750,7 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_special_embedded_log_class->mc_ops = &zdb_metaslab_ops;

zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
Expand Down Expand Up @@ -6887,7 +6888,9 @@ zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
spa_embedded_log_class(spa) ||
msp->ms_group->mg_class ==
spa_special_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);

/*
Expand Down Expand Up @@ -7121,6 +7124,8 @@ dump_block_stats(spa_t *spa)
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
zcb->zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
zcb->zcb_totalasize +=
metaslab_class_get_alloc(spa_special_embedded_log_class(spa));
zcb->zcb_start = zcb->zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, zcb);

Expand Down Expand Up @@ -7169,6 +7174,7 @@ dump_block_stats(spa_t *spa)
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
Expand Down Expand Up @@ -7252,6 +7258,18 @@ dump_block_stats(spa_t *spa)
100.0 * alloc / space);
}

if (spa_special_embedded_log_class(spa)->mc_allocator[0].mca_rotor
!= NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_embedded_log_class(spa));

(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special embedded log", (u_longlong_t)alloc,
100.0 * alloc / space);
}

for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb->zcb_embedded_blocks[i] == 0)
continue;
Expand Down
1 change: 1 addition & 0 deletions include/sys/spa.h
Original file line number Diff line number Diff line change
Expand Up @@ -1065,6 +1065,7 @@ extern metaslab_class_t *spa_normal_class(spa_t *spa);
extern metaslab_class_t *spa_log_class(spa_t *spa);
extern metaslab_class_t *spa_embedded_log_class(spa_t *spa);
extern metaslab_class_t *spa_special_class(spa_t *spa);
extern metaslab_class_t *spa_special_embedded_log_class(spa_t *spa);
extern metaslab_class_t *spa_dedup_class(spa_t *spa);
extern metaslab_class_t *spa_preferred_class(spa_t *spa, const zio_t *zio);
extern boolean_t spa_special_has_ddt(spa_t *spa);
Expand Down
1 change: 1 addition & 0 deletions include/sys/spa_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ struct spa {
metaslab_class_t *spa_log_class; /* intent log data class */
metaslab_class_t *spa_embedded_log_class; /* log on normal vdevs */
metaslab_class_t *spa_special_class; /* special allocation class */
metaslab_class_t *spa_special_embedded_log_class; /* log on special */
metaslab_class_t *spa_dedup_class; /* dedup allocation class */
uint64_t spa_first_txg; /* first txg after spa_open() */
uint64_t spa_final_txg; /* txg of export/destroy */
Expand Down
4 changes: 4 additions & 0 deletions include/sys/zil.h
Original file line number Diff line number Diff line change
Expand Up @@ -635,13 +635,17 @@ extern void zil_set_logbias(zilog_t *zilog, uint64_t slogval);

extern uint64_t zil_max_copied_data(zilog_t *zilog);
extern uint64_t zil_max_log_data(zilog_t *zilog, size_t hdrsize);
extern itx_wr_state_t zil_write_state(zilog_t *zilog, uint64_t size,
uint32_t blocksize, boolean_t o_direct, boolean_t commit);

extern void zil_sums_init(zil_sums_t *zs);
extern void zil_sums_fini(zil_sums_t *zs);
extern void zil_kstat_values_update(zil_kstat_values_t *zs,
zil_sums_t *zil_sums);

extern int zil_replay_disable;
extern uint_t zfs_immediate_write_sz;
extern int zil_special_is_slog;

#ifdef __cplusplus
}
Expand Down
16 changes: 12 additions & 4 deletions man/man4/zfs.4
Original file line number Diff line number Diff line change
Expand Up @@ -1713,10 +1713,18 @@ Similar to
but for cleanup of old indirection records for removed vdevs.
.
.It Sy zfs_immediate_write_sz Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq s64
Largest data block to write to the ZIL.
Larger blocks will be treated as if the dataset being written to had the
.Sy logbias Ns = Ns Sy throughput
property set.
Largest write size to store the data directly into the ZIL if
.Sy logbias Ns = Ns Sy latency .
Larger writes may be written indirectly similar to
.Sy logbias Ns = Ns Sy throughput .
In presence of SLOG this parameter is ignored, as if it was set to infinity,
storing all written data into ZIL to not depend on regular vdev latency.
.
.It Sy zil_special_is_slog Ns = Ns Sy 1 Ns | Ns 0 Pq int
When enabled, and written blocks go to normal vdevs, treat present special
vdevs as SLOGs, storing all synchronously written data into ZIL directly.
Disabling this forces the indirect writes to preserve special vdev write
throughput and endurance, likely at the cost of normal vdev latency.
.
.It Sy zfs_initialize_value Ns = Ns Sy 16045690984833335022 Po 0xDEADBEEFDEADBEEE Pc Pq u64
Pattern written to vdev free space by
Expand Down
9 changes: 6 additions & 3 deletions module/zfs/metaslab.c
Original file line number Diff line number Diff line change
Expand Up @@ -750,7 +750,8 @@ metaslab_class_histogram_verify(metaslab_class_t *mc)
}

IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa) ||
mc == spa_special_embedded_log_class(mg->mg_vd->vdev_spa));

for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
Expand Down Expand Up @@ -1288,7 +1289,8 @@ metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa) ||
mc == spa_special_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
Expand Down Expand Up @@ -1316,7 +1318,8 @@ metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa) ||
mc == spa_special_embedded_log_class(mg->mg_vd->vdev_spa));

mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
Expand Down
15 changes: 14 additions & 1 deletion module/zfs/spa.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,11 +417,15 @@ spa_prop_get_config(spa_t *spa, nvlist_t *nv)
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
alloc += metaslab_class_get_alloc(
spa_special_embedded_log_class(spa));

size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
size += metaslab_class_get_space(
spa_special_embedded_log_class(spa));

spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src);
Expand Down Expand Up @@ -1679,6 +1683,8 @@ spa_activate(spa_t *spa, spa_mode_t mode)
"embedded_log", msp, B_TRUE);
spa->spa_special_class = metaslab_class_create(spa, "special",
msp, B_FALSE);
spa->spa_special_embedded_log_class = metaslab_class_create(spa,
"special_embedded_log", msp, B_TRUE);
spa->spa_dedup_class = metaslab_class_create(spa, "dedup",
msp, B_FALSE);

Expand Down Expand Up @@ -1853,6 +1859,9 @@ spa_deactivate(spa_t *spa)
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;

metaslab_class_destroy(spa->spa_special_embedded_log_class);
spa->spa_special_embedded_log_class = NULL;

metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;

Expand Down Expand Up @@ -9092,6 +9101,8 @@ spa_async_thread(void *arg)
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
old_space += metaslab_class_get_space(
spa_special_embedded_log_class(spa));

spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);

Expand All @@ -9100,6 +9111,8 @@ spa_async_thread(void *arg)
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
new_space += metaslab_class_get_space(
spa_special_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);

/*
Expand Down Expand Up @@ -10309,7 +10322,7 @@ spa_sync(spa_t *spa, uint64_t txg)

metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
/* spa_embedded_log_class has only one metaslab per vdev. */
/* Embedded log classes have only one metaslab per vdev. */
metaslab_class_evict_old(spa->spa_special_class, txg);
metaslab_class_evict_old(spa->spa_dedup_class, txg);

Expand Down
9 changes: 9 additions & 0 deletions module/zfs/spa_misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1308,6 +1308,7 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
metaslab_class_validate(spa_log_class(spa));
metaslab_class_validate(spa_embedded_log_class(spa));
metaslab_class_validate(spa_special_class(spa));
metaslab_class_validate(spa_special_embedded_log_class(spa));
metaslab_class_validate(spa_dedup_class(spa));

spa_config_exit(spa, SCL_ALL, spa);
Expand Down Expand Up @@ -1896,6 +1897,8 @@ spa_get_slop_space(spa_t *spa)
*/
uint64_t embedded_log =
metaslab_class_get_dspace(spa_embedded_log_class(spa));
embedded_log += metaslab_class_get_dspace(
spa_special_embedded_log_class(spa));
slop -= MIN(embedded_log, slop >> 1);

/*
Expand Down Expand Up @@ -2000,6 +2003,12 @@ spa_special_class(spa_t *spa)
return (spa->spa_special_class);
}

metaslab_class_t *
spa_special_embedded_log_class(spa_t *spa)
{
return (spa->spa_special_embedded_log_class);
}

metaslab_class_t *
spa_dedup_class(spa_t *spa)
{
Expand Down
21 changes: 15 additions & 6 deletions module/zfs/vdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -282,12 +282,15 @@ vdev_getops(const char *type)
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
* separate log group. For embedded slogs, vdev_log_mg will be non-NULL and
* will point to a metaslab group of either embedded_log_class (for normal
* vdevs) or special_embedded_log_class (for special vdevs).
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
if ((mc == spa_embedded_log_class(vd->vdev_spa) ||
mc == spa_special_embedded_log_class(vd->vdev_spa)) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
Expand Down Expand Up @@ -1508,8 +1511,13 @@ vdev_metaslab_group_create(vdev_t *vd)
vd->vdev_mg = metaslab_group_create(mc, vd);

if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd);
if (mc == spa_special_class(spa)) {
vd->vdev_log_mg = metaslab_group_create(
spa_special_embedded_log_class(spa), vd);
} else {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd);
}
}

/*
Expand Down Expand Up @@ -1624,9 +1632,10 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
* group. This works for normal and special vdevs.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
if ((vd->vdev_mg->mg_class == spa_normal_class(spa) ||
vd->vdev_mg->mg_class == spa_special_class(spa)) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
Expand Down
16 changes: 2 additions & 14 deletions module/zfs/zfs_log.c
Original file line number Diff line number Diff line change
Expand Up @@ -607,8 +607,6 @@ zfs_log_rename_whiteout(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
* called as soon as the write is on stable storage (be it via a DMU sync or a
* ZIL commit).
*/
static uint_t zfs_immediate_write_sz = 32768;

void
zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, offset_t off, ssize_t resid, boolean_t commit,
Expand All @@ -626,15 +624,8 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
return;
}

if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT || o_direct)
write_state = WR_INDIRECT;
else if (!spa_has_slogs(zilog->zl_spa) &&
resid >= zfs_immediate_write_sz)
write_state = WR_INDIRECT;
else if (commit)
write_state = WR_COPIED;
else
write_state = WR_NEED_COPY;
write_state = zil_write_state(zilog, resid, blocksize, o_direct,
commit);

(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &gen,
sizeof (gen));
Expand Down Expand Up @@ -938,6 +929,3 @@ zfs_log_clone_range(zilog_t *zilog, dmu_tx_t *tx, int txtype, znode_t *zp,
len -= partlen;
}
}

ZFS_MODULE_PARAM(zfs, zfs_, immediate_write_sz, UINT, ZMOD_RW,
"Largest data block to write to zil");
59 changes: 59 additions & 0 deletions module/zfs/zil.c
Original file line number Diff line number Diff line change
Expand Up @@ -2095,13 +2095,66 @@ zil_max_waste_space(zilog_t *zilog)
*/
static uint_t zil_maxcopied = 7680;

/*
* Largest write size to store the data directly into ZIL.
*/
uint_t zfs_immediate_write_sz = 32768;

/*
* When enabled and blocks go to normal vdev, treat special vdevs as SLOG,
* writing data to ZIL (WR_COPIED/WR_NEED_COPY). Disabling this forces the
* indirect writes (WR_INDIRECT) to preserve special vdev throughput and
* endurance, likely at the cost of normal vdev latency.
*/
int zil_special_is_slog = 1;

uint64_t
zil_max_copied_data(zilog_t *zilog)
{
uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t));
return (MIN(max_data, zil_maxcopied));
}

/*
* Determine the appropriate write state for ZIL transactions based on
* pool configuration, data placement, write size, and logbias settings.
*/
itx_wr_state_t
zil_write_state(zilog_t *zilog, uint64_t size, uint32_t blocksize,
boolean_t o_direct, boolean_t commit)
{
if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT || o_direct)
return (WR_INDIRECT);

/*
* Don't use indirect for too small writes to reduce overhead.
* Don't use indirect if written less than a half of a block if
* we are going to commit it immediately, since next write might
* rewrite the same block again, causing inflation. If commit
* is not planned, then next writes might coalesce, and so the
* indirect may be perfect.
*/
boolean_t indirect = (size >= zfs_immediate_write_sz &&
(size >= blocksize / 2 || !commit));

if (spa_has_slogs(zilog->zl_spa)) {
/* Dedicated slogs: never use indirect */
indirect = B_FALSE;
} else if (spa_has_special(zilog->zl_spa)) {
/* Special vdevs: only when beneficial */
boolean_t on_special = (blocksize <=
zilog->zl_os->os_zpl_special_smallblock);
indirect &= (on_special || !zil_special_is_slog);
}

if (indirect)
return (WR_INDIRECT);
else if (commit)
return (WR_COPIED);
else
return (WR_NEED_COPY);
}

static uint64_t
zil_itx_record_size(itx_t *itx)
{
Expand Down Expand Up @@ -4418,3 +4471,9 @@ ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,

ZFS_MODULE_PARAM(zfs_zil, zil_, maxcopied, UINT, ZMOD_RW,
"Limit in bytes WR_COPIED size");

ZFS_MODULE_PARAM(zfs, zfs_, immediate_write_sz, UINT, ZMOD_RW,
"Largest write size to store data into ZIL");

ZFS_MODULE_PARAM(zfs_zil, zil_, special_is_slog, INT, ZMOD_RW,
"Treat special vdevs as SLOG");
Loading