coreutils
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

dd/sparse test failure on Solaris (zfs-backed?) NFS


From: Jim Meyering
Subject: dd/sparse test failure on Solaris (zfs-backed?) NFS
Date: Fri, 23 Mar 2012 10:56:17 +0100

Investigating a dd/sparse test failure on Solaris 10 with a
(zfs-backed?) NFS partition, I did this:

$ rm file.in; dd if=/dev/urandom of=file.in iflag=fullblock bs=1M count=3; \
n_old=0; while :; do n=$(du -k file.in|cut -f1); test $n = $n_old && continue;\
n_old=$n; printf '%04d ' $n; date +%T.%N; done
3+0 records in
3+0 records out
3145728 bytes (3.1 MB) copied, 0.305188 s, 10.3 MB/s
0001 02:59:29.577159000
3077 03:00:18.581125500
^C

Notice how that creates a 3MiB file containing random bits,
(I had to use iflag=fullblock or I'd get just 1040 random bits
with the rest NULs) yet the system reports that the file occupies
<= 1KiB of space for nearly 50 seconds.

Inserting a use of the "sync" command in the loop did not make
a difference.  Nor did adding dd's oflag=sync option.

With the following change, the test passes both locally and on
that Solaris system:

>From e86085626fea5695baaed55955bc46f80c3db105 Mon Sep 17 00:00:00 2001
From: Jim Meyering <address@hidden>
Date: Fri, 23 Mar 2012 10:53:56 +0100
Subject: [PATCH] tests: skip part of dd/sparse on some file systems

* tests/dd/sparse: The last two parts of this test would fail due to
the underlying file system at least on Solaris 10 with NFS.  That file
system would report that a 3MiB file was occupying <= 1KiB of space
for nearly 50 seconds after creation.
---
 tests/dd/sparse |   54 ++++++++++++++++++++++++------------------------------
 1 file changed, 24 insertions(+), 30 deletions(-)

diff --git a/tests/dd/sparse b/tests/dd/sparse
index feb9447..b3046d7 100755
--- a/tests/dd/sparse
+++ b/tests/dd/sparse
@@ -42,35 +42,29 @@ compare exp out || fail=1
 dd if=file.in bs=1 conv=sparse | cat > file.out
 cmp file.in file.out || fail=1

-# Setup for block size tests
-dd if=/dev/urandom of=file.in bs=1M count=1
-truncate -s+1M file.in
-dd if=/dev/urandom of=file.in bs=1M count=1 conv=notrunc oflag=append
-
-# Note the block allocations below are usually equal,
-# but can vary by a file system block due to alignment,
-# which was seen on XFS at least.  Also on various BSDs
-# the sparse granularity was up to 8 file system blocks
-# (16KiB for the tested systems), causing this to be the
-# minimum accuracy we can support.
-alloc_equal() {
-  # 8 and 512 below are related, so hardcode sector_size for now
-  # : ${sector_size:=$(stat -c "%B" "$1")}
-  : ${sectors_per_block:=$(expr $(stat -f -c '%S' "$1") / 512)}
-  : ${min_sectors_per_sparse_block:=$(expr $sectors_per_block '*' 8)}
-  alloc_diff=$(expr $(stat -c %b "$1") - $(stat -c %b "$2"))
-  alloc_diff=$(echo $alloc_diff | tr -d -- -) # abs()
-  test $alloc_diff -le $min_sectors_per_sparse_block
-}
-
-# Ensure NUL blocks smaller than the block size are not made sparse
-dd if=file.in of=file.out bs=2M conv=sparse
-test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
-alloc_equal file.in file.out && fail=1
-
-# Ensure NUL blocks >= block size are made sparse
-dd if=file.in of=file.out bs=1M conv=sparse
-test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
-alloc_equal file.in file.out || fail=1
+# Setup for block size tests: create a 3MiB file with a 1MiB
+# stretch of NUL bytes in the middle.
+rm -f file.in
+dd if=/dev/urandom of=file.in iflag=fullblock bs=1M count=3 || fail=1
+dd if=/dev/zero of=file.in conv=notrunc bs=1M count=1 || fail=1
+
+kb_alloc() { du -k "$1"|cut -f1; }
+
+# If our just-created input file appears to be too small,
+# skip the remaining tests.  On at least Solaris 10 with NFS,
+# file.in is reported to occupy <= 1KiB for about 50 seconds
+# after its creation.
+if test $(kb_alloc file.in) -gt 3000; then
+
+  # Ensure NUL blocks smaller than the block size are not made sparse.
+  # Here, with a 2MiB block size, dd's conv=sparse must *not* introduce a hole.
+  dd if=file.in of=file.out bs=2M conv=sparse
+  test 2500 -lt $(kb_alloc file.out) || fail=1
+
+  # Ensure that this 1MiB string of NULs *is* converted to a hole.
+  dd if=file.in of=file.out bs=1M conv=sparse
+  test $(kb_alloc file.out) -lt 2500 || fail=1
+
+fi

 Exit $fail
--
1.7.10.rc1.23.g16a10



reply via email to

[Prev in Thread] Current Thread [Next in Thread]