Skip to content

Commit

Permalink
Fix false assertion in dmu_tx_dirty_buf() on cloning
Browse files Browse the repository at this point in the history
Same as writes block cloning can increase block size and number of
indirection levels.  That means it can dirty block 0 at level 0 or
at new top indirection level without explicitly holding them.

A block cloning test case for large offsets has been added.

Reviewed-by: Rob Norris <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Co-authored-by: Ameer Hamza <[email protected]>
Signed-off-by:	Alexander Motin <[email protected]>
Sponsored by:	iXsystems, Inc.
Closes openzfs#16825
  • Loading branch information
amotin authored and behlendorf committed Dec 5, 2024
1 parent d874f27 commit f54052a
Show file tree
Hide file tree
Showing 5 changed files with 95 additions and 1 deletion.
8 changes: 8 additions & 0 deletions module/zfs/dmu_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -800,6 +800,14 @@ dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
case THT_CLONE:
if (blkid >= beginblk && blkid <= endblk)
match_offset = TRUE;
/*
* They might have to increase nlevels,
* thus dirtying the new TLIBs. Or the
* might have to change the block size,
* thus dirying the new lvl=0 blk=0.
*/
if (blkid == 0)
match_offset = TRUE;
break;
default:
cmn_err(CE_PANIC, "bad txh_type %d",
Expand Down
2 changes: 1 addition & 1 deletion tests/runfiles/common.run
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ tests = ['block_cloning_clone_mmap_cached',
'block_cloning_copyfilerange_fallback_same_txg',
'block_cloning_replay', 'block_cloning_replay_encrypted',
'block_cloning_lwb_buffer_overflow', 'block_cloning_clone_mmap_write',
'block_cloning_rlimit_fsize']
'block_cloning_rlimit_fsize', 'block_cloning_large_offset']
tags = ['functional', 'block_cloning']

[tests/functional/bootfs]
Expand Down
2 changes: 2 additions & 0 deletions tests/test-runner/bin/zts-report.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,8 @@ elif sys.platform.startswith('linux'):
['SKIP', cfr_reason],
'block_cloning/block_cloning_rlimit_fsize':
['SKIP', cfr_reason],
'block_cloning/block_cloning_large_offset':
['SKIP', cfr_reason],
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
'cp_files/cp_files_002_pos': ['SKIP', cfr_reason],
Expand Down
1 change: 1 addition & 0 deletions tests/zfs-tests/tests/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/block_cloning/block_cloning_replay_encrypted.ksh \
functional/block_cloning/block_cloning_lwb_buffer_overflow.ksh \
functional/block_cloning/block_cloning_rlimit_fsize.ksh \
functional/block_cloning/block_cloning_large_offset.ksh \
functional/bootfs/bootfs_001_pos.ksh \
functional/bootfs/bootfs_002_neg.ksh \
functional/bootfs/bootfs_003_pos.ksh \
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#

. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib

#
# DESCRIPTION:
# Verify that cloning a file at a large offset is possible.
#
# STRATEGY:
# 1. Create dataset.
# 2. Populate the source file with 1024 blocks at 1024 block offset.
# 3. Clone 1024 blocks at a 1024-block offset.
# 4. Compare the cloned file with the original file.
#

verify_runnable "global"

if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
log_unsupported "copy_file_range not available before Linux 4.5"
fi

claim="The first clone at a large offset is functional"

log_assert $claim

function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
}

log_onexit cleanup

#
# 1. Create dataset.
#
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
sync_pool $TESTPOOL

#
# 2. Populate the source file with 1024 blocks at 1024 block offset.
#
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 \
oflag=sync bs=128k count=1024 seek=1024
sync_pool $TESTPOOL

#
# 3. Clone 1024 blocks at a 1024-block offset.
#
log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 134217728 134217728 \
134217728
sync_pool $TESTPOOL

#
# 4. Compare the cloned file with the original file.
#
log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)

# FreeBSD's seq(1) leaves a trailing space, remove it with sed(1).
log_must [ "$blocks" = "$(seq -s " " 0 1023 | sed 's/ $//')" ]

log_pass $claim

0 comments on commit f54052a

Please sign in to comment.