coreutils
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: dd/sparse test failure on Solaris (zfs-backed?) NFS


From: Jim Meyering
Subject: Re: dd/sparse test failure on Solaris (zfs-backed?) NFS
Date: Fri, 23 Mar 2012 12:49:48 +0100

Bernhard Voelker wrote:
> On 03/23/2012 10:56 AM, Jim Meyering wrote:
>
>> +# Setup for block size tests: create a 3MiB file with a 1MiB
>> +# stretch of NUL bytes in the middle.
>> +rm -f file.in
>> +dd if=/dev/urandom of=file.in iflag=fullblock bs=1M count=3 || fail=1
>> +dd if=/dev/zero of=file.in conv=notrunc bs=1M count=1 || fail=1
>
>
> The NUL bytes are not in the middle:
>
> $ hexdump file.in | head -n 4
> 0000000 0000 0000 0000 0000 0000 0000 0000 0000
> *
> 0100000 f105 7956 e6ce e161 b166 0e84 208a 40ee
> 0100010 1531 ac0d 1c49 78a9 7aed 7f56 9161 4f25
>
> Did you mean seek=1M?

Good catch.  To make the code match the "in the middle" part of that comment,
I need a "seek=1". (not seek=1M, since the block size is already 1M)

Here's an incremental.

diff --git a/tests/dd/sparse b/tests/dd/sparse
index b3046d7..35ddda9 100755
--- a/tests/dd/sparse
+++ b/tests/dd/sparse
@@ -45,8 +45,8 @@ cmp file.in file.out || fail=1
 # Setup for block size tests: create a 3MiB file with a 1MiB
 # stretch of NUL bytes in the middle.
 rm -f file.in
-dd if=/dev/urandom of=file.in iflag=fullblock bs=1M count=3 || fail=1
-dd if=/dev/zero of=file.in conv=notrunc bs=1M count=1 || fail=1
+dd if=/dev/urandom of=file.in bs=1M count=3 iflag=fullblock || fail=1
+dd if=/dev/zero    of=file.in bs=1M count=1 seek=1 conv=notrunc || fail=1

 kb_alloc() { du -k "$1"|cut -f1; }


I'm still a little leery about using /dev/urandom,
but not enough to bother changing it right now.

>From 4b101ccd176eb3951bfbab717a0a3b5e2c4d19ef Mon Sep 17 00:00:00 2001
From: Jim Meyering <address@hidden>
Date: Fri, 23 Mar 2012 10:53:56 +0100
Subject: [PATCH] tests: skip part of dd/sparse on some file systems

* tests/dd/sparse: The last two parts of this test would fail due to
the underlying file system at least on Solaris 10 with NFS.  That file
system would report that a 3MiB file was occupying <= 1KiB of space
for nearly 50 seconds after creation.
Improved-by: Bernhard Voelker
---
 tests/dd/sparse |   54 ++++++++++++++++++++++++------------------------------
 1 file changed, 24 insertions(+), 30 deletions(-)

diff --git a/tests/dd/sparse b/tests/dd/sparse
index feb9447..35ddda9 100755
--- a/tests/dd/sparse
+++ b/tests/dd/sparse
@@ -42,35 +42,29 @@ compare exp out || fail=1
 dd if=file.in bs=1 conv=sparse | cat > file.out
 cmp file.in file.out || fail=1

-# Setup for block size tests
-dd if=/dev/urandom of=file.in bs=1M count=1
-truncate -s+1M file.in
-dd if=/dev/urandom of=file.in bs=1M count=1 conv=notrunc oflag=append
-
-# Note the block allocations below are usually equal,
-# but can vary by a file system block due to alignment,
-# which was seen on XFS at least.  Also on various BSDs
-# the sparse granularity was up to 8 file system blocks
-# (16KiB for the tested systems), causing this to be the
-# minimum accuracy we can support.
-alloc_equal() {
-  # 8 and 512 below are related, so hardcode sector_size for now
-  # : ${sector_size:=$(stat -c "%B" "$1")}
-  : ${sectors_per_block:=$(expr $(stat -f -c '%S' "$1") / 512)}
-  : ${min_sectors_per_sparse_block:=$(expr $sectors_per_block '*' 8)}
-  alloc_diff=$(expr $(stat -c %b "$1") - $(stat -c %b "$2"))
-  alloc_diff=$(echo $alloc_diff | tr -d -- -) # abs()
-  test $alloc_diff -le $min_sectors_per_sparse_block
-}
-
-# Ensure NUL blocks smaller than the block size are not made sparse
-dd if=file.in of=file.out bs=2M conv=sparse
-test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
-alloc_equal file.in file.out && fail=1
-
-# Ensure NUL blocks >= block size are made sparse
-dd if=file.in of=file.out bs=1M conv=sparse
-test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
-alloc_equal file.in file.out || fail=1
+# Setup for block size tests: create a 3MiB file with a 1MiB
+# stretch of NUL bytes in the middle.
+rm -f file.in
+dd if=/dev/urandom of=file.in bs=1M count=3 iflag=fullblock || fail=1
+dd if=/dev/zero    of=file.in bs=1M count=1 seek=1 conv=notrunc || fail=1
+
+kb_alloc() { du -k "$1"|cut -f1; }
+
+# If our just-created input file appears to be too small,
+# skip the remaining tests.  On at least Solaris 10 with NFS,
+# file.in is reported to occupy <= 1KiB for about 50 seconds
+# after its creation.
+if test $(kb_alloc file.in) -gt 3000; then
+
+  # Ensure NUL blocks smaller than the block size are not made sparse.
+  # Here, with a 2MiB block size, dd's conv=sparse must *not* introduce a hole.
+  dd if=file.in of=file.out bs=2M conv=sparse
+  test 2500 -lt $(kb_alloc file.out) || fail=1
+
+  # Ensure that this 1MiB string of NULs *is* converted to a hole.
+  dd if=file.in of=file.out bs=1M conv=sparse
+  test $(kb_alloc file.out) -lt 2500 || fail=1
+
+fi

 Exit $fail
--
1.7.10.rc1.23.g16a10



reply via email to

[Prev in Thread] Current Thread [Next in Thread]