Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion include/sys/spa.h
Original file line number Diff line number Diff line change
Expand Up @@ -1030,7 +1030,7 @@ extern void spa_import_progress_set_notes_nolog(spa_t *spa,
extern int spa_config_tryenter(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
extern void spa_config_enter_mmp(spa_t *spa, int locks, const void *tag,
extern void spa_config_enter_priority(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/mmp.c
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ mmp_write_uberblock(spa_t *spa)
uint64_t offset;

hrtime_t lock_acquire_time = gethrtime();
spa_config_enter_mmp(spa, SCL_STATE, mmp_tag, RW_READER);
spa_config_enter_priority(spa, SCL_STATE, mmp_tag, RW_READER);
lock_acquire_time = gethrtime() - lock_acquire_time;
if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
Expand Down
8 changes: 4 additions & 4 deletions module/zfs/spa_misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)

static void
spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
int mmp_flag)
int priority_flag)
{
(void) tag;
int wlocks_held = 0;
Expand All @@ -526,7 +526,7 @@ spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer ||
(!mmp_flag && scl->scl_write_wanted)) {
(!priority_flag && scl->scl_write_wanted)) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
Expand All @@ -551,7 +551,7 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
}

/*
* The spa_config_enter_mmp() allows the mmp thread to cut in front of
* The spa_config_enter_priority() allows the mmp thread to cut in front of
* outstanding write lock requests. This is needed since the mmp updates are
* time sensitive and failure to service them promptly will result in a
* suspended pool. This pool suspension has been seen in practice when there is
Expand All @@ -560,7 +560,7 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
*/

void
spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
spa_config_enter_priority(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 1);
}
Expand Down
25 changes: 23 additions & 2 deletions module/zfs/zio.c
Original file line number Diff line number Diff line change
Expand Up @@ -4574,8 +4574,29 @@ zio_vdev_io_start(zio_t *zio)
ASSERT0(zio->io_child_error[ZIO_CHILD_VDEV]);

if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) {
/*
* A deadlock workaround. The ddt_prune_unique_entries()
* -> prune_candidates_sync() code path takes the
* SCL_ZIO reader lock and may request it again here.
* If there is another thread who wants the SCL_ZIO
* writer lock, then scl_write_wanted will be set.
* Thus, the spa_config_enter_priority() is used to
* ignore pending writer requests.
*
* The locking should be revised to remove the need
* for this workaround. If that's not workable then
* it should only be applied to the zios involved in
* the pruning process. This impacts the read/write
* I/O balance while pruning.
*/
if (spa->spa_active_ddt_prune)
spa_config_enter_priority(spa, SCL_ZIO, zio,
RW_READER);
else
spa_config_enter(spa, SCL_ZIO, zio,
RW_READER);
}

/*
* The mirror_ops handle multiple DVAs in a single BP.
Expand Down