#
#
# add_dir "tests/'heads'"
#
# add_dir "tests/'heads'_with_discontinuous_branches"
#
# add_dir "tests/attr_set_get_commands"
#
# add_dir "tests/changing_passphrase_of_a_private_key"
#
# add_dir "tests/creating_a_good_and_bad_test_result"
#
# add_dir "tests/diffing_a_revision_with_an_added_file"
#
# add_dir "tests/importing_a_CVS_file_with_one_version"
#
# add_dir "tests/list_missing_files"
#
# add_dir "tests/manifest_restrictions"
#
# add_dir "tests/merging_a_rename_twice"
#
# add_dir "tests/renaming_a_deleted_file"
#
# add_dir "tests/renaming_a_patched_file"
#
# add_dir "tests/single_character_filename_support"
#
# add_dir "tests/subdirectory_restrictions"
#
# add_dir "tests/test_a_merge"
#
# add_dir "tests/updating_from_a_merge_which_adds_a_file"
#
# add_dir "tests/updating_to_a_given_revision"
#
# add_file "tests/'heads'/__driver__.lua"
# content [e9946a22ae4f0ad372209105d2a59b06dce00623]
#
# add_file "tests/'heads'_with_discontinuous_branches/__driver__.lua"
# content [7059df07d5993350e3d1e1d66441c4a239bf2365]
#
# add_file "tests/attr_set_get_commands/__driver__.lua"
# content [cf3184ab7f485cc0d78eaa56315354f88b280494]
#
# add_file "tests/changing_passphrase_of_a_private_key/__driver__.lua"
# content [07157ac02b192bf7f97b7c19f8398a6811007769]
#
# add_file "tests/creating_a_good_and_bad_test_result/__driver__.lua"
# content [8057102a8f32c3ab8811f472f7a2f1a6574f6d74]
#
# add_file "tests/creating_a_good_and_bad_test_result/bad"
# content [edb043f48a2b91914e3d7f0d443f09405388f42f]
#
# add_file "tests/creating_a_good_and_bad_test_result/final"
# content [37492211554020fc4264f6e1b786ae3159d14bb4]
#
# add_file "tests/creating_a_good_and_bad_test_result/good"
# content [1901cbc55ee28e0b3f57d358d4c260dbd92d95e9]
#
# add_file "tests/creating_a_good_and_bad_test_result/root"
# content [aa28e376c2dfb3908bfc5fe1f502a7af4cb81d40]
#
# add_file "tests/creating_a_good_and_bad_test_result/work"
# content [cc97f4c30e83293c486e2765904d59a287875888]
#
# add_file "tests/diffing_a_revision_with_an_added_file/__driver__.lua"
# content [a5ecf57cdffe2f8a3adae115d4be50a2f34dd5a7]
#
# add_file "tests/importing_a_CVS_file_with_one_version/__driver__.lua"
# content [2e1237dc81f133325edf90326d472775d50d7fe5]
#
# add_file "tests/list_missing_files/__driver__.lua"
# content [b926a8569fada085efe533609783127919ef0843]
#
# add_file "tests/manifest_restrictions/__driver__.lua"
# content [0f07ea8cd7d9ec8b63f48f008a2d10ee7492a7a1]
#
# add_file "tests/manifest_restrictions/ignored.lua"
# content [71e1c5fe3bbe6493a5c72ecc16daf108ac73b2bc]
#
# add_file "tests/merging_a_rename_twice/__driver__.lua"
# content [473ca98c8bb0ff9f01edd8eae5c02fa6b5ed3c67]
#
# add_file "tests/renaming_a_deleted_file/__driver__.lua"
# content [3e19173031269466aed55561cbf8a30bd5de735e]
#
# add_file "tests/renaming_a_patched_file/__driver__.lua"
# content [32ee3c13f0f2ef0aad23ec544da1aaa7b00d0ae0]
#
# add_file "tests/single_character_filename_support/__driver__.lua"
# content [f606b0b2e75e3900a5d5344ecb762c53eb9d1521]
#
# add_file "tests/subdirectory_restrictions/__driver__.lua"
# content [23e4a37c900482a77dcad2580595a1953ab6e320]
#
# add_file "tests/test_a_merge/__driver__.lua"
# content [17cae767d80b9c4b1eec42fc85c367a8e60d2c63]
#
# add_file "tests/test_a_merge/correct"
# content [d193fc305a0a54b836a4bedaa344f4e978a6573d]
#
# add_file "tests/test_a_merge/left"
# content [7e718789c8e733f9a3e9bafd9843f8e5c4bfbdc8]
#
# add_file "tests/test_a_merge/parent"
# content [b451b5a7e2e01f0ac194ca957b64fbee4d6012a4]
#
# add_file "tests/test_a_merge/right"
# content [757a48524c12712192e948ebe8f8f312d1618be7]
#
# add_file "tests/updating_from_a_merge_which_adds_a_file/__driver__.lua"
# content [14c179004eb8d2a7179f077e17a1d9d22d21d733]
#
# add_file "tests/updating_to_a_given_revision/__driver__.lua"
# content [12fa7e799cfeed20b60a2dc34a02e32890240809]
#
# add_file "tests/updating_to_a_given_revision/left-leaf"
# content [37492211554020fc4264f6e1b786ae3159d14bb4]
#
# add_file "tests/updating_to_a_given_revision/middle"
# content [1901cbc55ee28e0b3f57d358d4c260dbd92d95e9]
#
# add_file "tests/updating_to_a_given_revision/modified-left-leaf"
# content [4c3c1764e908c45e0f23407a8b24ad8975d11ffa]
#
# add_file "tests/updating_to_a_given_revision/modified-root"
# content [4cd564a1df96ff6893a33ef95ecf920d69999d2d]
#
# add_file "tests/updating_to_a_given_revision/right-leaf"
# content [06d89c824155e557b2d66c88adaf33e112fc0e25]
#
# add_file "tests/updating_to_a_given_revision/root"
# content [aa28e376c2dfb3908bfc5fe1f502a7af4cb81d40]
#
# patch "tester.cc"
# from [6990cf999039ad1eaa03ec5c511fd58ac72aebd0]
# to [d40ade4fec88a65942ab694afca516330a65b639]
#
# patch "tester.lua"
# from [489d8c494f46c2551a81250c0a0b0b80d06eb493]
# to [fd12e0c5032a3d5fe98c1ba1186c580c24822421]
#
# patch "testsuite.at"
# from [0061fcad7d9c8da8ed4519e26dd2d1935ab0eccc]
# to [477a9f90a62d5f5b3861e368988bd194ad681d49]
#
# patch "testsuite.lua"
# from [2a2858f3b1990fcccc8e74bc0d9a04ddc8f9c179]
# to [e3316fe979a171964970a1676312a20da63e399e]
#
============================================================
--- tests/'heads'/__driver__.lua e9946a22ae4f0ad372209105d2a59b06dce00623
+++ tests/'heads'/__driver__.lua e9946a22ae4f0ad372209105d2a59b06dce00623
@@ -0,0 +1,106 @@
+
+mtn_setup()
+
+-- Create a single revision in branch1
+--
+-- root (branch1)
+--
+-- branch1 heads: root
+
+revs = {}
+
+addfile("f", "base data")
+commit("branch1")
+revs.root = base_revision()
+
+check(cmd(mtn("--branch=branch1", "heads")), 0, true, false)
+check(qgrep(revs.root, "stdout"))
+
+-- Create a child
+--
+-- root (branch1)
+-- /
+-- child1 (branch1)
+--
+-- branch1 heads: child1
+
+writefile("f", "child1 data")
+commit("branch1")
+revs.child1 = base_revision()
+
+check(cmd(mtn("--branch=branch1", "heads")), 0, true, false)
+check(not qgrep(revs.root, "stdout"))
+check(qgrep(revs.child1, "stdout"))
+
+-- Create another child
+--
+-- root (branch1)
+-- / \
+-- child1 (branch1) child2 (branch1)
+--
+-- branch1 heads: child1, child2
+
+revert_to(revs.root)
+writefile("f", "child2 data")
+commit("branch1")
+revs.child2 = base_revision()
+
+check(cmd(mtn("--branch=branch1", "heads")), 0, true, false)
+check(not qgrep(revs.root, "stdout"))
+check(qgrep(revs.child1, "stdout"))
+check(qgrep(revs.child2, "stdout"))
+
+-- Branch from the second child into branch2
+--
+-- root (branch1)
+-- / \
+-- child1 (branch1) child2 (branch1)
+-- \
+-- child3 (branch2)
+--
+-- branch1 heads: child1, child2
+-- branch2 heads: child3
+
+writefile("f", "child3 data")
+commit("branch2")
+revs.child3 = base_revision()
+
+check(cmd(mtn("--branch=branch1", "heads")), 0, true, false)
+check(not qgrep(revs.root, "stdout"))
+check(qgrep(revs.child1, "stdout"))
+check(qgrep(revs.child2, "stdout"))
+check(not qgrep(revs.child3, "stdout"))
+check(cmd(mtn("--branch=branch2", "heads")), 0, true, false)
+check(not qgrep(revs.root, "stdout"))
+check(not qgrep(revs.child1, "stdout"))
+check(not qgrep(revs.child2, "stdout"))
+check(qgrep(revs.child3, "stdout"))
+
+-- Branch from the first child into branch2
+--
+-- root (branch1)
+-- / \
+-- child1 (branch1) child2 (branch1)
+-- / \
+-- child4 (branch2) child3 (branch2)
+--
+-- branch1 heads: child1, child2
+-- branch2 heads: child3, child4
+
+revert_to(revs.child1)
+writefile("f", "child4 data")
+commit("branch2")
+revs.child4 = base_revision()
+
+check(cmd(mtn("--branch=branch1", "heads")), 0, true, false)
+check(not qgrep(revs.root, "stdout"))
+check(qgrep(revs.child1, "stdout"))
+check(qgrep(revs.child2, "stdout"))
+check(not qgrep(revs.child3, "stdout"))
+check(not qgrep(revs.child4, "stdout"))
+check(cmd(mtn("--branch=branch2", "heads")), 0, true, false)
+check(not qgrep(revs.root, "stdout"))
+check(not qgrep(revs.child1, "stdout"))
+check(not qgrep(revs.child2, "stdout"))
+check(qgrep(revs.child3, "stdout"))
+check(qgrep(revs.child4, "stdout"))
============================================================
--- tests/'heads'_with_discontinuous_branches/__driver__.lua 7059df07d5993350e3d1e1d66441c4a239bf2365
+++ tests/'heads'_with_discontinuous_branches/__driver__.lua 7059df07d5993350e3d1e1d66441c4a239bf2365
@@ -0,0 +1,40 @@
+
+mtn_setup()
+
+-- This tests the 'heads' command with a graph like:
+--
+-- r1 (branch1)
+-- |
+-- r2 (branch2)
+-- |
+-- r3 (branch1)
+--
+-- 'heads' on branch1 should show only r3, not r1.
+
+revs = {}
+
+-- Create R1
+writefile("f", "r1 data")
+check(cmd(mtn("add", "f")), 0, false, false)
+commit("branch1")
+revs[1] = base_revision()
+
+-- Sanity check first...
+check(cmd(mtn("--branch=branch1", "heads")), 0, true, false)
+check(qgrep(revs[1], "stdout"))
+
+-- Now create R2
+writefile("f", "r2 data")
+commit("branch2")
+revs[2] = base_revision()
+
+-- Now create R3
+writefile("f", "r3 data")
+commit("branch1")
+revs[3] = base_revision()
+
+-- Now check heads on branch1
+check(cmd(mtn("--branch=branch1", "heads")), 0, true, false)
+check(not qgrep(revs[1], "stdout"))
+check(not qgrep(revs[2], "stdout"))
+check(qgrep(revs[3], "stdout"))
============================================================
--- tests/attr_set_get_commands/__driver__.lua cf3184ab7f485cc0d78eaa56315354f88b280494
+++ tests/attr_set_get_commands/__driver__.lua cf3184ab7f485cc0d78eaa56315354f88b280494
@@ -0,0 +1,33 @@
+
+mtn_setup()
+
+writefile("foo", "some data")
+-- Check a single character filename too, because those have had bugs.
+writefile("a", "some data")
+
+check(cmd(mtn("add", "foo")), 0, false, false)
+check(cmd(mtn("add", "a")), 0, false, false)
+check(cmd(mtn("attr", "set", "foo", "test:test_attr", "true")), 0, false, false)
+check(cmd(mtn("attr", "set", "a", "test:test_attr", "1")), 0, false, false)
+commit()
+co_r_sha1 = base_revision()
+
+check(cmd(mtn("attr", "drop", "foo", "test:test_attr")), 0, false, false)
+check(cmd(mtn("attr", "set", "a", "test:test_attr", "2")), 0, false, false)
+commit()
+update_r_sha1 = base_revision()
+
+-- Check checkouts.
+remove_recursive("co-dir")
+check(cmd(mtn("checkout", "--revision", co_r_sha1, "co-dir")), 0, true)
+check(qgrep("test:test_attr:foo:true", "stdout"))
+check(qgrep("test:test_attr:a:1", "stdout"))
+
+-- Check updates.
+remove_recursive("co-dir")
+check(cmd(mtn("checkout", "--revision", update_r_sha1, "co-dir")), 0, true)
+check(not qgrep("test:test_attr:foo", "stdout"))
+check(qgrep("test:test_attr:a:2", "stdout"))
+
+-- check that files must exist to have attributes set
+check(cmd(mtn("attr", "set", "missing", "mtn:execute")), 1, false, false)
============================================================
--- tests/changing_passphrase_of_a_private_key/__driver__.lua 07157ac02b192bf7f97b7c19f8398a6811007769
+++ tests/changing_passphrase_of_a_private_key/__driver__.lua 07157ac02b192bf7f97b7c19f8398a6811007769
@@ -0,0 +1,22 @@
+
+mtn_setup()
+
+tkey = "address@hidden"
+
+-- generate a new key
+check(cmd(mtn("genkey", tkey)), 0, false, false, string.rep(tkey.."\n", 2))
+
+-- fail to enter any passphrase
+check(cmd(mtn("chkeypass", tkey)), 1, false, false)
+
+-- fail to give correct old passphrase
+check(cmd(mtn("chkeypass", tkey)), 1, false, false, string.rep("bad\n", 3))
+
+-- fail to repeat new password
+check(cmd(mtn("chkeypass", tkey)), 1, false, false, tkey.."\n"..tkey.."-new\nbad\n")
+
+-- change the passphrase successfully
+check(cmd(mtn("chkeypass", tkey)), 0, false, false, tkey.."\n"..string.rep(tkey.."-new\n", 2))
+
+-- check that the passphrase changed
+check(cmd(mtn("chkeypass", tkey)), 0, false, false, tkey.."-new\n"..string.rep(tkey.."\n",2))
============================================================
--- tests/creating_a_good_and_bad_test_result/__driver__.lua 8057102a8f32c3ab8811f472f7a2f1a6574f6d74
+++ tests/creating_a_good_and_bad_test_result/__driver__.lua 8057102a8f32c3ab8811f472f7a2f1a6574f6d74
@@ -0,0 +1,35 @@
+
+mtn_setup()
+
+getfile("root", "testfile")
+check(cmd(mtn("add", "testfile")), 0, false, false)
+commit()
+root_r_sha = base_revision()
+root_f_sha = sha1("testfile")
+
+getfile("good", "testfile")
+commit()
+left_good_r_sha = base_revision()
+left_good_f_sha = sha1("testfile")
+check(left_good_r_sha ~= root_r_sha)
+check(left_good_f_sha ~= root_f_sha)
+
+getfile("bad", "testfile")
+commit()
+left_bad_r_sha = base_revision()
+left_bad_f_sha = sha1("testfile")
+check(left_bad_r_sha ~= left_good_r_sha)
+check(left_bad_f_sha ~= left_good_f_sha)
+
+probe_node("testfile", root_r_sha, root_f_sha)
+
+getfile("work", "testfile")
+check(cmd(mtn("testresult", root_r_sha, "1")), 0, false, false)
+check(cmd(mtn("testresult", left_good_r_sha, "1")), 0, false, false)
+check(cmd(mtn("testresult", left_bad_r_sha, "0")), 0, false, false)
+check(cmd(mtn("update")), 0, false, false)
+
+-- files should now be merged
+
+getfile("final", "probe")
+check(samefile("testfile", "probe"))
============================================================
--- tests/creating_a_good_and_bad_test_result/bad edb043f48a2b91914e3d7f0d443f09405388f42f
+++ tests/creating_a_good_and_bad_test_result/bad edb043f48a2b91914e3d7f0d443f09405388f42f
@@ -0,0 +1,5 @@
+first line of the file
+an insertion between first and second
+second line of the file
+third line of the file
+an evil line which should never be seen
============================================================
--- tests/creating_a_good_and_bad_test_result/final 37492211554020fc4264f6e1b786ae3159d14bb4
+++ tests/creating_a_good_and_bad_test_result/final 37492211554020fc4264f6e1b786ae3159d14bb4
@@ -0,0 +1,5 @@
+first line of the file
+an insertion between first and second
+second line of the file
+an insertion between second and third
+third line of the file
============================================================
--- tests/creating_a_good_and_bad_test_result/good 1901cbc55ee28e0b3f57d358d4c260dbd92d95e9
+++ tests/creating_a_good_and_bad_test_result/good 1901cbc55ee28e0b3f57d358d4c260dbd92d95e9
@@ -0,0 +1,4 @@
+first line of the file
+an insertion between first and second
+second line of the file
+third line of the file
============================================================
--- tests/creating_a_good_and_bad_test_result/root aa28e376c2dfb3908bfc5fe1f502a7af4cb81d40
+++ tests/creating_a_good_and_bad_test_result/root aa28e376c2dfb3908bfc5fe1f502a7af4cb81d40
@@ -0,0 +1,3 @@
+first line of the file
+second line of the file
+third line of the file
============================================================
--- tests/creating_a_good_and_bad_test_result/work cc97f4c30e83293c486e2765904d59a287875888
+++ tests/creating_a_good_and_bad_test_result/work cc97f4c30e83293c486e2765904d59a287875888
@@ -0,0 +1,4 @@
+first line of the file
+second line of the file
+an insertion between second and third
+third line of the file
============================================================
--- tests/diffing_a_revision_with_an_added_file/__driver__.lua a5ecf57cdffe2f8a3adae115d4be50a2f34dd5a7
+++ tests/diffing_a_revision_with_an_added_file/__driver__.lua a5ecf57cdffe2f8a3adae115d4be50a2f34dd5a7
@@ -0,0 +1,11 @@
+
+mtn_setup()
+
+addfile("foo1", "foo file 1")
+commit()
+parent = base_revision()
+
+addfile("foo2", "foo file 2")
+commit()
+
+check(cmd(mtn("diff", "--revision", parent, "--revision", base_revision())), 0, false, false)
============================================================
--- tests/importing_a_CVS_file_with_one_version/__driver__.lua 2e1237dc81f133325edf90326d472775d50d7fe5
+++ tests/importing_a_CVS_file_with_one_version/__driver__.lua 2e1237dc81f133325edf90326d472775d50d7fe5
@@ -0,0 +1,29 @@
+
+skip_if(not existsonpath("cvs"))
+mtn_setup()
+
+writefile("importme.0", "version 0 of test file")
+
+tsha = sha1("importme.0")
+
+-- build the cvs repository
+
+cvsroot = test_root .. "/cvs-repository"
+check(cmd("cvs", "-q", "-d", cvsroot, "init"), 0, false, false)
+check(exists(cvsroot))
+check(exists(cvsroot .. "/CVSROOT"))
+check(exists(cvsroot .. "/CVSROOT/modules"))
+
+-- check out the workspace and make a commit
+
+check(cmd("cvs", "-d", cvsroot, "co", "."), 0, false, false)
+mkdir("testdir")
+os.rename("importme.0", "testdir/importme")
+check(cmd("cvs", "-d", cvsroot, "add", "testdir"), 0, false, false)
+check(cmd("cvs", "-d", cvsroot, "add", "testdir/importme"), 0, false, false)
+check(cmd("cvs", "-d", cvsroot, "commit", "-m", 'commit 0', "testdir/importme"), 0, false, false)
+
+-- import into monotone and check presence of file
+
+check(cmd(mtn("--branch=testbranch", "cvs_import", cvsroot .. "/testdir")), 0, false, false)
+check(cmd(mtn("automate", "get_file", tsha)), 0, false)
============================================================
--- tests/list_missing_files/__driver__.lua b926a8569fada085efe533609783127919ef0843
+++ tests/list_missing_files/__driver__.lua b926a8569fada085efe533609783127919ef0843
@@ -0,0 +1,38 @@
+
+mtn_setup()
+
+writefile("foo", "the foo file")
+
+writefile("bar", "the bar file")
+
+writefile("missingfoo", "foo\n")
+
+writefile("missingbar", "bar\n")
+
+check(cmd(mtn("ls", "missing")), 0, false)
+
+check(cmd(mtn("add", "foo", "bar")), 0, false, false)
+check(cmd(mtn("ls", "missing")), 0, false)
+
+remove("foo")
+check(cmd(mtn("ls", "missing")), 0, true, 0)
+canonicalize("stdout")
+check(samefile("stdout", "missingfoo"))
+
+writefile("foo")
+check(cmd(mtn("drop", "foo")), 0, false, false)
+remove("foo")
+check(cmd(mtn("ls", "missing")), 0, 0, 0)
+
+commit()
+check(cmd(mtn("ls", "missing")), 0, 0, 0)
+
+remove("bar")
+check(cmd(mtn("ls", "missing")), 0, true, 0)
+canonicalize("stdout")
+check(samefile("stdout", "missingbar"))
+
+writefile("bar")
+check(cmd(mtn("drop", "bar")), 0, false, false)
+remove("bar")
+check(cmd(mtn("ls", "missing")), 0, 0, 0)
============================================================
--- tests/manifest_restrictions/__driver__.lua 0f07ea8cd7d9ec8b63f48f008a2d10ee7492a7a1
+++ tests/manifest_restrictions/__driver__.lua 0f07ea8cd7d9ec8b63f48f008a2d10ee7492a7a1
@@ -0,0 +1,274 @@
+
+mtn_setup()
+
+getfile("ignored.lua")
+
+mkdir("work")
+mkdir("work/A")
+mkdir("work/A/B")
+
+writefile("work/foo.o", "version 1 of foo.o")
+
+addfile("work/fileX", "version 1 of fileX which will be renamed to work/file1")
+addfile("work/file2", "version 1 of file2")
+addfile("work/file3", "version 1 of file3")
+
+writefile("work/file4", "version 1 of file4")
+
+addfile("work/A/fileA", "file in a subdirectory")
+addfile("work/A/B/fileAB", "file in a deeper subdirectory")
+
+-- initial commit
+
+commit()
+
+-- FIXME_RESTRICTIONS: the old code allows for --depth=N with no paths
+-- and adds the "." path so that depth is interpreted against the current
+-- included directory. this seems bad. how does --depth interact with --exclude?
+--check(cmd(mtn("ls", "known", "--depth=0")), 0, true, false)
+--check(not qgrep("fileX", "stdout"))
+
+check(cmd(mtn("ls", "known", "--depth=0", ".")) , 0, true, false)
+check(not qgrep("fileX", "stdout"))
+
+check(cmd(mtn("ls", "known", "--depth=1", ".")) , 0, true, false)
+check(qgrep("fileX", "stdout"))
+
+check(cmd(mtn("ls", "known", "--depth=0", "work/A")) , 0, true, false)
+check(not qgrep("fileAB", "stdout"))
+
+check(cmd(mtn("ls", "known", "--depth=1", "work/A")) , 0, true, false)
+check(qgrep("fileAB", "stdout"))
+
+-- test restriction of unknown, missing, ignored files
+
+check(cmd(mtn("ls", "unknown")), 0, true, false)
+check(qgrep("work/file4", "stdout"))
+
+check(cmd(mtn("ls", "unknown", "work")), 0, true, false)
+check(qgrep("work/file4", "stdout"))
+
+os.rename("work/file2", "work/filex2")
+
+check(cmd(mtn("ls", "missing")), 0, true, false)
+check(qgrep("work/file2", "stdout"))
+
+check(cmd(mtn("ls", "missing", "work/file2")), 0, true, false)
+check(qgrep("work/file2", "stdout"))
+
+os.rename("work/filex2", "work/file2")
+
+check(cmd(mtn("ls", "ignored", "--rcfile=ignored.lua")), 0, true, false)
+check(qgrep("work/foo.o", "stdout"))
+
+check(cmd(mtn("ls", "ignored", "--rcfile=ignored.lua", "work")), 0, true, false)
+check(qgrep("work/foo.o", "stdout"))
+
+-- create moved, dropped, and changed work to test status, diff, commit
+
+os.rename("work/fileX", "work/file1")
+os.remove("work/file2")
+
+writefile("work/file3", "version 2 of file3 with some changes")
+writefile("work/A/fileA", "version 2 of fileA with some changes")
+writefile("work/A/B/fileAB", "version 2 of fileAB with some changes")
+
+check(cmd(mtn("rename", "work/fileX", "work/file1")), 0, false, false)
+check(cmd(mtn("drop", "work/file2")), 0, false, false)
+check(cmd(mtn("add", "work/file4")), 0, false, false)
+
+-- moved fileX to file1
+-- dropped file2
+-- changed file3
+-- added file4
+
+-- test for files included/excluded in various outputs
+
+function included(...)
+ local missed = {}
+ local ok = true
+ for _,x in ipairs(arg) do
+ if not qgrep("work/file"..x, "stdout") then
+ table.insert(missed, x)
+ ok = false
+ end
+ end
+ if not ok then
+ L("missed: ", table.concat(missed, " "), "\n")
+ end
+ return ok
+end
+
+function excluded(...)
+ local missed = {}
+ local ok = true
+ for _,x in ipairs(arg) do
+ if qgrep("work/file"..x, "stdout") then
+ table.insert(missed, x)
+ ok = false
+ end
+ end
+ if not ok then
+ L("seen: ", table.concat(missed, " "), "\n")
+ end
+ return ok
+end
+
+-- status
+
+check(cmd(mtn("status")), 0, true, false)
+check(included("X", 1, 2, 3, 4), 0, false)
+
+-- include both source and target of rename
+
+check(cmd(mtn("status", "work/fileX", "work/file1")), 0, true, false)
+check(included("X", 1))
+check(excluded(2, 3, 4))
+
+-- include drop
+
+check(cmd(mtn("status", "work/file2")), 0, true, false)
+check(included(2))
+check(excluded("X", 1, 3, 4))
+
+-- include delta
+
+check(cmd(mtn("status", "work/file3")), 0, true, false)
+check(included(3))
+check(excluded("X", 1, 2, 4))
+
+-- include add
+
+check(cmd(mtn("status", "work/file4")), 0, true, false)
+check(included(4))
+check(excluded("X", 1, 2, 3))
+
+-- diff
+
+check(cmd(mtn("diff")), 0, true, false)
+check(included("X", 1, 2, 3, 4))
+
+check(cmd(mtn("diff", "--depth=0", ".")), 0, true, false)
+check(not qgrep("fileAB", "stdout"))
+
+check(cmd(mtn("diff", "--depth=2", ".")), 0, true, false)
+check(qgrep("fileA", "stdout"))
+
+check(cmd(mtn("diff", "--context", "--depth=0", ".")), 0, true, false)
+check(not qgrep("fileAB", "stdout"))
+
+check(cmd(mtn("diff", "--context", "--depth=2", ".")), 0, true, false)
+check(qgrep("fileA", "stdout"))
+
+-- include both source and target of rename
+
+check(cmd(mtn("diff", "work/fileX", "work/file1")), 0, true, false)
+check(included("X", 1))
+check(excluded(2, 3, 4))
+
+-- include drop
+
+check(cmd(mtn("diff", "work/file2")), 0, true, false)
+check(included(2))
+check(excluded("X", 1, 3, 4))
+
+-- include delta
+
+check(cmd(mtn("diff", "work/file3")), 0, true, false)
+check(included(3))
+check(excluded("X", 1, 2, 4))
+
+-- include add
+
+check(cmd(mtn("diff", "work/file4")), 0, true, false)
+check(included(4))
+check(excluded("X", 1, 2, 3))
+
+-- commit
+
+check(cmd(mtn("status")), 0, true, false)
+check(included("X", 1, 2, 3, 4))
+
+-- include rename source and target
+
+check(cmd(mtn("commit", "--message=move fileX to file1",
+ "work/fileX", "work/file1")), 0, false, false)
+
+check(cmd(mtn("status")), 0, true, false)
+check(included(2, 3, 4))
+check(excluded("X", 1))
+
+-- include drop
+
+check(cmd(mtn("commit", "--message=drop file2", "work/file2")), 0, false, false)
+
+check(cmd(mtn("status")), 0, true, false)
+check(included(3, 4))
+check(excluded("X", 1, 2))
+
+-- include delta
+
+check(cmd(mtn("commit", "--message=change file3", "work/file3")), 0, false, false)
+
+check(cmd(mtn("status")), 0, true, false)
+check(included(4))
+check(excluded("X", 1, 2, 3))
+
+-- include add
+
+check(cmd(mtn("commit", "--message=add file4", "work/file4")), 0, false, false)
+
+check(cmd(mtn("status")), 0, true, false)
+check(excluded("X", 1, 2, 3, 4))
+
+-- setup for excluded commits
+
+-- moved file1 to fileY
+-- dropped file2
+-- changed file3
+-- added file4
+
+-- moved file3 to file
+-- dropped file1
+-- changed file4
+-- added file5
+
+-- exclude rename source
+-- exclude rename target
+-- exclude drop
+-- exclude delta
+-- exclude add
+
+-- test bad removal of restricted files
+-- (set/iterator/erase bug found by address@hidden)
+
+nums = {[1] = "one", [2] = "two", [3] = "three",
+ [4] = "four", [5] = "five", [6] = "six",
+ [7] = "seven", [8] = "eight", [9] = "nine",
+ [10] = "ten", [11] = "eleven", [12] = "twelve"}
+for i = 1,12 do
+ addfile("file."..nums[i], "file "..nums[i])
+end
+
+commit()
+
+for i = 1,11 do
+ if i ~= 2 then
+ writefile("file."..nums[i], "new file "..nums[i])
+ end
+end
+
+check(cmd(mtn("diff", "file.four", "file.ten")), 0, true, false)
+
+check(qgrep("file.four", "stdout"))
+check(qgrep("file.ten", "stdout"))
+
+-- none of these should show up in the diff
+-- only four and ten are included
+
+for i = 1,12
+do
+ if i ~= 4 and i ~= 10 then
+ check(not qgrep("file.$i", "stdout"))
+ end
+end
============================================================
--- tests/manifest_restrictions/ignored.lua 71e1c5fe3bbe6493a5c72ecc16daf108ac73b2bc
+++ tests/manifest_restrictions/ignored.lua 71e1c5fe3bbe6493a5c72ecc16daf108ac73b2bc
@@ -0,0 +1,4 @@
+function ignore_file(name)
+ if (string.find(name, "%.o$")) then return true end
+ return false;
+end
============================================================
--- tests/merging_a_rename_twice/__driver__.lua 473ca98c8bb0ff9f01edd8eae5c02fa6b5ed3c67
+++ tests/merging_a_rename_twice/__driver__.lua 473ca98c8bb0ff9f01edd8eae5c02fa6b5ed3c67
@@ -0,0 +1,91 @@
+
+mtn_setup()
+
+-- x_branch y_branch
+--
+-- A
+-- |\ this edge says rename(x, y)
+-- | ----------
+-- | \
+-- B E--------\
+-- | | |
+-- | F |
+-- C | |
+-- |\propagate1 | |
+-- | -----------G |
+-- | | J
+-- | H |
+-- D | |
+-- \propagate2 | |
+-- -----------I---------K
+
+writefile("x", "data of state A")
+writefile("foo", "extra blah blah foo")
+writefile("bar", "extra blah blah bar")
+writefile("baz", "extra blah blah baz")
+writefile("quux", "extra blah blah quux")
+
+revs = {}
+
+-- produce state A
+check(cmd(mtn("add", "x")), 0, false, false)
+commit("branch.x")
+revs.a = base_revision()
+
+-- produce state B
+writefile("x", "data of state B")
+commit("branch.x")
+
+-- produce state C
+writefile("x", "data of state C")
+commit("branch.x")
+revs.c = base_revision()
+
+-- produce state E
+revert_to(revs.a)
+check(cmd(mtn("rename", "x", "y")), 0, false, false)
+rename("x", "y")
+commit("branch.y")
+revs.e = base_revision()
+
+-- produce state F
+check(cmd(mtn("add", "foo")), 0, false, false)
+commit("branch.y")
+
+-- produce state G
+check(cmd(mtn("propagate", "branch.x", "branch.y")), 0, false, false)
+check(cmd(mtn("--branch=branch.y", "update")), 0, false, false)
+revs.g = base_revision()
+check(qgrep('state C', "y"))
+
+-- produce state D
+revert_to(revs.c)
+writefile("x", "data of state D")
+check(cmd(mtn("add", "bar")), 0, false, false)
+commit("branch.x")
+
+-- produce state H
+revert_to(revs.g)
+check(cmd(mtn("add", "baz")), 0, false, false)
+commit("branch.y")
+
+-- produce state I
+check(cmd(mtn("propagate", "branch.x", "branch.y")), 0, false, false)
+check(cmd(mtn("--branch=branch.y", "update")), 0, false, false)
+check(qgrep('state D', "y"))
+
+-- produce state J
+revert_to(revs.e)
+check(cmd(mtn("add", "quux")), 0, false, false)
+commit("branch.y")
+
+-- produce state K
+check(cmd(mtn("--branch=branch.y", "merge")), 0, false, false)
+check(cmd(mtn("--branch=branch.y", "update")), 0, false, false)
+
+check(cmd(mtn("automate", "get_manifest_of")), 0, true)
+os.rename("stdout", "manifest")
+check(qgrep('"y"', "manifest"))
+check(not qgrep('"x"', "manifest"))
+check(exists("y"))
+check(qgrep('state D', "y"))
============================================================
--- tests/renaming_a_deleted_file/__driver__.lua 3e19173031269466aed55561cbf8a30bd5de735e
+++ tests/renaming_a_deleted_file/__driver__.lua 3e19173031269466aed55561cbf8a30bd5de735e
@@ -0,0 +1,36 @@
+
+mtn_setup()
+
+writefile("foo", "foo file")
+writefile("baz", "baz file")
+
+-- produce root
+check(cmd(mtn("add", "foo")), 0, false, false)
+commit()
+root_r_sha = base_revision()
+root_f_sha = sha1("foo")
+
+-- produce move edge
+check(cmd(mtn("add", "baz")), 0, false, false)
+check(cmd(mtn("rename", "foo", "bar")), 0, false, false)
+os.rename("foo", "bar")
+commit()
+
+-- revert to root
+probe_node("foo", root_r_sha, root_f_sha)
+os.remove("bar")
+
+-- make a delete edge on the move preimage
+check(cmd(mtn("drop", "foo")), 0, false, false)
+commit()
+
+-- merge the del and the rename
+check(cmd(mtn("merge")), 0, false, false)
+check(cmd(mtn("update")), 0, false, false)
+
+-- check that the delete landed on the renamed target
+check(cmd(mtn("automate", "get_manifest_of")), 0, true)
+os.rename("stdout", "manifest")
+check(qgrep("baz", "manifest"))
+check(not qgrep("bar", "manifest"))
+check(not qgrep("foo", "manifest"))
============================================================
--- tests/renaming_a_patched_file/__driver__.lua 32ee3c13f0f2ef0aad23ec544da1aaa7b00d0ae0
+++ tests/renaming_a_patched_file/__driver__.lua 32ee3c13f0f2ef0aad23ec544da1aaa7b00d0ae0
@@ -0,0 +1,36 @@
+
+mtn_setup()
+
+writefile("foo", "foo file")
+writefile("bleh", "bleh file")
+
+-- produce root
+check(cmd(mtn("add", "foo")), 0, false, false)
+commit()
+root_r_sha = base_revision()
+root_f_sha = sha1("foo")
+
+-- produce move edge
+check(cmd(mtn("rename", "foo", "bar")), 0, false, false)
+os.rename("foo", "bar")
+commit()
+
+-- revert to root
+probe_node("foo", root_r_sha, root_f_sha)
+os.remove("bar")
+
+-- make a delta edge on the move preimage
+copyfile("bleh", "foo")
+commit()
+
+-- merge the delta and the rename
+check(cmd(mtn("merge")), 0, false, false)
+check(cmd(mtn("update")), 0, false, false)
+
+-- check that the delta landed on the renamed target
+check(cmd(mtn("automate", "get_manifest_of")), 0, true)
+os.rename("stdout", "manifest")
+check(qgrep("bar", "manifest"))
+check(not qgrep("foo", "manifest"))
+check(qgrep("bleh", "bar"))
+os.remove("bar")
============================================================
--- tests/single_character_filename_support/__driver__.lua f606b0b2e75e3900a5d5344ecb762c53eb9d1521
+++ tests/single_character_filename_support/__driver__.lua f606b0b2e75e3900a5d5344ecb762c53eb9d1521
@@ -0,0 +1,27 @@
+
+mtn_setup()
+revs = {}
+
+addfile("a", "some data")
+commit()
+revs.a = base_revision()
+
+check(cmd(mtn("rename", "a", "b")), 0, false, false)
+os.rename("a", "b")
+commit()
+revs.b = base_revision()
+
+check(cmd(mtn("drop", "b")), 0, false, false)
+remove("b")
+commit()
+revs.null = base_revision()
+
+for _,x in pairs{{revs.a, true, false},
+ {revs.b, false, true},
+ {revs.null, false, false}} do
+ remove_recursive("_MTN")
+ check(cmd(mtn("checkout", "--revision", x[1], "co-dir")), 0, false, false)
+ check(exists("co-dir/a") == x[2])
+ check(exists("co-dir/b") == x[3])
+ remove_recursive("co-dir")
+end
============================================================
--- tests/subdirectory_restrictions/__driver__.lua 23e4a37c900482a77dcad2580595a1953ab6e320
+++ tests/subdirectory_restrictions/__driver__.lua 23e4a37c900482a77dcad2580595a1953ab6e320
@@ -0,0 +1,60 @@
+
+mtn_setup()
+
+mkdir("foo")
+mkdir("bar")
+
+writefile("foo/foo.txt", "file foo.txt in dir foo")
+writefile("bar/bar.txt", "file bar.txt in dir bar")
+
+check(cmd(mtn("add", "foo")), 0, false, false)
+check(cmd(mtn("add", "bar")), 0, false, false)
+
+commit()
+
+writefile("foo/foo.txt", "file foo.txt in dir foo changed")
+writefile("bar/bar.txt", "file bar.txt in dir bar changed")
+
+-- should have tests for
+-- add, drop, rename, revert
+-- - which use prefixing
+-- ls unknown, ignored, missing
+-- - which use add_restriction and in_restriction directly
+-- commit, diff, status
+-- - which use add_restriction and in_restriction through
+-- restrict_patch_set
+
+check(cmd(mtn("status")), 0, true, 0)
+check(qgrep("foo/foo", "stdout"))
+check(qgrep("bar/bar", "stdout"))
+
+chdir("foo")
+check(cmd(mtn("--norc", "status")), 0, true)
+check(qgrep("foo/foo", "stdout"))
+check(qgrep("bar/bar", "stdout"))
+
+check(cmd(mtn("--norc", "status", ".")), 0, true)
+check(qgrep("foo/foo", "stdout"))
+check(cmd("pwd"), 0, false)
+check(not qgrep("bar/bar", "stdout"))
+
+check(cmd(mtn("--norc", "status", "..")), 0, true)
+check(qgrep("foo/foo", "stdout"))
+check(qgrep("bar/bar", "stdout"))
+chdir("..")
+
+chdir("bar")
+check(cmd(mtn("--norc", "status")), 0, true)
+check(qgrep("foo/foo", "stdout"))
+check(qgrep("bar/bar", "stdout"))
+
+check(cmd(mtn("--norc", "status", ".")), 0, true)
+check(not qgrep("foo/foo", "stdout"))
+check(qgrep("bar/bar", "stdout"))
+
+check(cmd(mtn("--norc", "status", "..")), 0, true)
+check(qgrep("foo/foo", "stdout"))
+check(qgrep("bar/bar", "stdout"))
+chdir("..")
+
+-- TODO: test a.c a.h a/foo.c a/foo.h from inside and outside of a
============================================================
--- tests/test_a_merge/__driver__.lua 17cae767d80b9c4b1eec42fc85c367a8e60d2c63
+++ tests/test_a_merge/__driver__.lua 17cae767d80b9c4b1eec42fc85c367a8e60d2c63
@@ -0,0 +1,33 @@
+
+mtn_setup()
+
+-- This is (was) a real merge error. 'right' contains only a single
+-- added function; when it was really merged with 'left', the last
+-- line of this function was lost.
+
+-- This may actually be (have been) a bug in the unidiff algorithm;
+-- 'diff' and 'mtn diff' produce(d) different results when calculating
+-- diff(parent, left).
+
+getfile("parent")
+getfile("left")
+getfile("right")
+getfile("correct")
+
+copyfile("parent", "testfile")
+check(cmd(mtn("add", "testfile")), 0, false, false)
+commit(testbranch)
+parent = base_revision()
+
+copyfile("left", "testfile")
+commit()
+
+revert_to(parent)
+
+copyfile("right", "testfile")
+commit()
+
+check(cmd(mtn("merge")), 0, false, false)
+
+check(cmd(mtn("update")), 0, false, false)
+check(samefile("testfile", "correct"))
============================================================
--- tests/test_a_merge/correct d193fc305a0a54b836a4bedaa344f4e978a6573d
+++ tests/test_a_merge/correct d193fc305a0a54b836a4bedaa344f4e978a6573d
@@ -0,0 +1,2237 @@
+// copyright (C) 2002, 2003 graydon hoare
+// all rights reserved.
+// licensed to the public under the terms of the GNU GPL (>= 2)
+// see the file COPYING for details
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+
+#include "app_state.hh"
+#include "cert.hh"
+#include "cleanup.hh"
+#include "constants.hh"
+#include "database.hh"
+#include "keys.hh"
+#include "sanity.hh"
+#include "schema_migration.hh"
+#include "cert.hh"
+#include "transforms.hh"
+#include "ui.hh"
+#include "vocab.hh"
+#include "xdelta.hh"
+
+// defined in schema.sql, converted to header:
+#include "schema.h"
+
+// defined in views.sql, converted to header:
+#include "views.h"
+
+// this file defines a public, typed interface to the database.
+// the database class encapsulates all knowledge about sqlite,
+// the schema, and all SQL statements used to access the schema.
+//
+// see file schema.sql for the text of the schema.
+
+using boost::shared_ptr;
+using boost::lexical_cast;
+using namespace std;
+
+int const one_row = 1;
+int const one_col = 1;
+int const any_rows = -1;
+int const any_cols = -1;
+
+extern "C" {
+ // strangely this isn't declared, even though it's present in my sqlite.
+ char *sqlite_vmprintf(const char *zFormat, va_list);
+}
+
+database::database(fs::path const & fn) :
+ filename(fn),
+ // nb. update this if you change the schema. unfortunately we are not
+ // using self-digesting schemas due to comment irregularities and
+ // non-alphabetic ordering of tables in sql source files. we could create
+ // a temporary db, write our intended schema into it, and read it back,
+ // but this seems like it would be too rude. possibly revisit this issue.
+ schema("c1e86588e11ad07fa53e5d294edc043ce1d4005a"),
+ __sql(NULL),
+ transaction_level(0)
+{}
+
+void
+database::check_schema()
+{
+ string db_schema_id;
+ calculate_schema_id (__sql, db_schema_id);
+ N (schema == db_schema_id,
+ F("database schemas do not match: "
+ "wanted %s, got %s. try migrating database")
+ % schema % db_schema_id);
+}
+
+static void
+sqlite_unbase64_fn(sqlite_func *f, int nargs, char const ** args)
+{
+ if (nargs != 1)
+ {
+ sqlite_set_result_error(f, "need exactly 1 arg to unbase64()", -1);
+ return;
+ }
+ data decoded;
+ decode_base64(base64(string(args[0])), decoded);
+ sqlite_set_result_string(f, decoded().c_str(), decoded().size());
+}
+
+void
+database::set_app(app_state * app)
+{
+ __app = app;
+}
+
+struct sqlite *
+database::sql(bool init)
+{
+ if (! __sql)
+ {
+ if (! init)
+ {
+ if (filename.string() == "")
+ throw informative_failure(string("no database specified"));
+ else if (! fs::exists(filename))
+ throw informative_failure(string("database ") + filename.string() +
+ string(" does not exist"));
+ }
+ N(filename.string() != "",
+ F("need database name"));
+ char * errmsg = NULL;
+ __sql = sqlite_open(filename.string().c_str(), 0755, &errmsg);
+ if (! __sql)
+ throw oops(string("could not open database: ") + filename.string() +
+ (errmsg ? (": " + string(errmsg)) : ""));
+ if (init)
+ execute(schema_constant);
+
+ check_schema();
+ install_functions(__app);
+ install_views();
+ }
+ return __sql;
+}
+
+void
+database::initialize()
+{
+ if (__sql)
+ throw oops("cannot initialize database while it is open");
+
+ N(!fs::exists(filename),
+ F("could not initialize database: %s: already exists")
+ % filename.string());
+
+ fs::path journal = mkpath(filename.string() + "-journal");
+ N(!fs::exists(journal),
+ F("existing (possibly stale) journal file '%s' has same stem as new database '%s'")
+ % journal.string() % filename.string());
+
+ sqlite *s = sql(true);
+ I(s != NULL);
+}
+
+
+struct
+dump_request
+{
+ dump_request() {};
+ struct sqlite *sql;
+ string table_name;
+ ostream *out;
+};
+
+static int
+dump_row_cb(void *data, int n, char **vals, char **cols)
+{
+ dump_request *dump = reinterpret_cast(data);
+ I(dump != NULL);
+ I(vals != NULL);
+ I(dump->out != NULL);
+ *(dump->out) << F("INSERT INTO %s VALUES(") % dump->table_name;
+ for (int i = 0; i < n; ++i)
+ {
+ if (i != 0)
+ *(dump->out) << ',';
+
+ if (vals[i] == NULL)
+ *(dump->out) << "NULL";
+ else
+ {
+ *(dump->out) << "'";
+ for (char *cp = vals[i]; *cp; ++cp)
+ {
+ if (*cp == '\'')
+ *(dump->out) << "''";
+ else
+ *(dump->out) << *cp;
+ }
+ *(dump->out) << "'";
+ }
+ }
+ *(dump->out) << ");\n";
+ return 0;
+}
+
+static int
+dump_table_cb(void *data, int n, char **vals, char **cols)
+{
+ dump_request *dump = reinterpret_cast(data);
+ I(dump != NULL);
+ I(dump->sql != NULL);
+ I(vals != NULL);
+ I(vals[0] != NULL);
+ I(vals[1] != NULL);
+ I(vals[2] != NULL);
+ I(n == 3);
+ if (string(vals[1]) == "table")
+ {
+ *(dump->out) << vals[2] << ";\n";
+ dump->table_name = string(vals[0]);
+ sqlite_exec_printf(dump->sql, "SELECT * FROM '%q'",
+ dump_row_cb, data, NULL, vals[0]);
+ }
+ return 0;
+}
+
+void
+database::dump(ostream & out)
+{
+ dump_request req;
+ req.out = &out;
+ req.sql = sql();
+ out << "BEGIN TRANSACTION;\n";
+ int res = sqlite_exec(req.sql,
+ "SELECT name, type, sql FROM sqlite_master "
+ "WHERE type='table' AND sql NOT NULL "
+ "ORDER BY substr(type,2,1), name",
+ dump_table_cb, &req, NULL);
+ I(res == SQLITE_OK);
+ out << "COMMIT;\n";
+}
+
+void
+database::load(istream & in)
+{
+ char buf[constants::bufsz];
+ string tmp;
+
+ N(filename.string() != "",
+ F("need database name"));
+ char * errmsg = NULL;
+ __sql = sqlite_open(filename.string().c_str(), 0755, &errmsg);
+ if (! __sql)
+ throw oops(string("could not open database: ") + filename.string() +
+ (errmsg ? (": " + string(errmsg)) : ""));
+
+ while(in)
+ {
+ in.read(buf, constants::bufsz);
+ tmp.append(buf, in.gcount());
+ }
+
+ execute(tmp.c_str());
+}
+
+
+void
+database::debug(string const & sql, ostream & out)
+{
+ results res;
+ fetch(res, any_cols, any_rows, sql.c_str());
+ out << "'" << sql << "' -> " << res.size() << " rows\n" << endl;
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ for (size_t j = 0; j < res[i].size(); ++j)
+ {
+ if (j != 0)
+ out << " | ";
+ out << res[i][j];
+ }
+ out << endl;
+ }
+}
+
+unsigned long
+database::get_statistic(string const & query)
+{
+ results res;
+ fetch(res, 1, 1, query.c_str());
+ return lexical_cast(res[0][0]);
+}
+
+void
+database::info(ostream & out)
+{
+ string id;
+ calculate_schema_id(sql(), id);
+ out << "schema version : " << id << endl;
+ out << "full manifests : " << get_statistic("SELECT COUNT(*) FROM manifests") << endl;
+ out << "manifest deltas : " << get_statistic("SELECT COUNT(*) FROM manifest_deltas") << endl;
+ out << "full files : " << get_statistic("SELECT COUNT(*) FROM files") << endl;
+ out << "file deltas : " << get_statistic("SELECT COUNT(*) FROM file_deltas") << endl;
+}
+
+void
+database::version(ostream & out)
+{
+ string id;
+ calculate_schema_id(sql(), id);
+ out << "database schema version: " << id << endl;
+}
+
+void
+database::migrate()
+{
+ N(filename.string() != "",
+ F("need database name"));
+ char * errmsg = NULL;
+ __sql = sqlite_open(filename.string().c_str(), 0755, &errmsg);
+ if (! __sql)
+ throw oops(string("could not open database: ") + filename.string() +
+ (errmsg ? (": " + string(errmsg)) : ""));
+ migrate_monotone_schema(__sql);
+ sqlite_close(__sql);
+}
+
+void
+database::rehash()
+{
+ transaction_guard guard(*this);
+ ticker mcerts("mcerts", "m", 1);
+ ticker fcerts("fcerts", "f", 1);
+ ticker pubkeys("pubkeys", "+", 1);
+ ticker privkeys("privkeys", "!", 1);
+
+ {
+ // rehash all mcerts
+ results res;
+ vector certs;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM manifest_certs");
+ results_to_certs(res, certs);
+ execute("DELETE FROM manifest_certs");
+ for(vector::const_iterator i = certs.begin(); i != certs.end(); ++i)
+ {
+ put_cert(*i, "manifest_certs");
+ ++mcerts;
+ }
+ }
+
+ {
+ // rehash all fcerts
+ results res;
+ vector certs;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM file_certs");
+ results_to_certs(res, certs);
+ execute("DELETE FROM file_certs");
+ for(vector::const_iterator i = certs.begin(); i != certs.end(); ++i)
+ {
+ put_cert(*i, "file_certs");
+ ++fcerts;
+ }
+ }
+
+
+ {
+ // rehash all pubkeys
+ results res;
+ fetch(res, 2, any_rows, "SELECT id, keydata FROM public_keys");
+ execute("DELETE FROM public_keys");
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ hexenc tmp;
+ key_hash_code(rsa_keypair_id(res[i][0]), base64(res[i][1]), tmp);
+ execute("INSERT INTO public_keys VALUES('%q', '%q', '%q')",
+ tmp().c_str(), res[i][0].c_str(), res[i][1].c_str());
+ ++pubkeys;
+ }
+ }
+
+{
+ // rehash all privkeys
+ results res;
+ fetch(res, 2, any_rows, "SELECT id, keydata FROM private_keys");
+ execute("DELETE FROM private_keys");
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ hexenc tmp;
+ key_hash_code(rsa_keypair_id(res[i][0]), base64< arc4 >(res[i][1]), tmp);
+ execute("INSERT INTO private_keys VALUES('%q', '%q', '%q')",
+ tmp().c_str(), res[i][0].c_str(), res[i][1].c_str());
+ ++privkeys;
+ }
+ }
+
+ guard.commit();
+}
+
+void
+database::ensure_open()
+{
+ sqlite *s = sql();
+ I(s != NULL);
+}
+
+database::~database()
+{
+ if (__sql)
+ {
+ sqlite_close(__sql);
+ __sql = 0;
+ }
+}
+
+static void
+assert_sqlite_ok(int res)
+{
+ switch (res)
+ {
+ case SQLITE_OK:
+ break;
+
+ case SQLITE_ERROR:
+ throw oops("SQL error or missing database");
+ break;
+
+ case SQLITE_INTERNAL:
+ throw oops("An internal logic error in SQLite");
+ break;
+
+ case SQLITE_PERM:
+ throw oops("Access permission denied");
+ break;
+
+ case SQLITE_ABORT:
+ throw oops("Callback routine requested an abort");
+ break;
+
+ case SQLITE_BUSY:
+ throw oops("The database file is locked");
+ break;
+
+ case SQLITE_LOCKED:
+ throw oops("A table in the database is locked");
+ break;
+
+ case SQLITE_NOMEM:
+ throw oops("A malloc() failed");
+ break;
+
+ case SQLITE_READONLY:
+ throw oops("Attempt to write a readonly database");
+ break;
+
+ case SQLITE_INTERRUPT:
+ throw oops("Operation terminated by sqlite_interrupt()");
+ break;
+
+ case SQLITE_IOERR:
+ throw oops("Some kind of disk I/O error occurred");
+ break;
+
+ case SQLITE_CORRUPT:
+ throw oops("The database disk image is malformed");
+ break;
+
+ case SQLITE_NOTFOUND:
+ throw oops("(Internal Only) Table or record not found");
+ break;
+
+ case SQLITE_FULL:
+ throw oops("Insertion failed because database is full");
+ break;
+
+ case SQLITE_CANTOPEN:
+ throw oops("Unable to open the database file");
+ break;
+
+ case SQLITE_PROTOCOL:
+ throw oops("database lock protocol error");
+ break;
+
+ case SQLITE_EMPTY:
+ throw oops("(Internal Only) database table is empty");
+ break;
+
+ case SQLITE_SCHEMA:
+ throw oops("The database schema changed");
+ break;
+
+ case SQLITE_TOOBIG:
+ throw oops("Too much data for one row of a table");
+ break;
+
+ case SQLITE_CONSTRAINT:
+ throw oops("Abort due to contraint violation");
+ break;
+
+ case SQLITE_MISMATCH:
+ throw oops("Data type mismatch");
+ break;
+
+ case SQLITE_MISUSE:
+ throw oops("Library used incorrectly");
+ break;
+
+ default:
+ throw oops(string("Unknown DB result code: ") + lexical_cast(res));
+ break;
+ }
+}
+
+void
+database::execute(char const * query, ...)
+{
+ va_list ap;
+ int res;
+ char * errmsg = NULL;
+
+ va_start(ap, query);
+
+ // log it
+ char * formatted = sqlite_vmprintf(query, ap);
+ string qq(formatted);
+ if (qq.size() > constants::db_log_line_sz)
+ qq = qq.substr(0, constants::db_log_line_sz) + string(" ...");
+ L(F("db.execute(\"%s\")\n") % qq);
+ sqlite_freemem(formatted);
+
+ va_end(ap);
+ va_start(ap, query);
+
+ // do it
+ res = sqlite_exec_vprintf(sql(), query, NULL, NULL, &errmsg, ap);
+
+ va_end(ap);
+
+ if (errmsg)
+ throw oops(string("sqlite exec error ") + errmsg);
+
+ assert_sqlite_ok(res);
+
+}
+
+void
+database::fetch(results & res,
+ int const want_cols,
+ int const want_rows,
+ char const * query, ...)
+{
+ char ** result = NULL;
+ int nrow;
+ int ncol;
+ char * errmsg = NULL;
+ int rescode;
+
+ va_list ap;
+ res.clear();
+ res.resize(0);
+ va_start(ap, query);
+
+ // log it
+ char * formatted = sqlite_vmprintf(query, ap);
+ string qq(formatted);
+ if (qq.size() > constants::log_line_sz)
+ qq = qq.substr(0, constants::log_line_sz) + string(" ...");
+ L(F("db.fetch(\"%s\")\n") % qq);
+ sqlite_freemem(formatted);
+
+ va_end(ap);
+ va_start(ap, query);
+
+ // do it
+ rescode = sqlite_get_table_vprintf(sql(), query, &result, &nrow, &ncol, &errmsg, ap);
+
+ va_end(ap);
+
+ cleanup_ptr
+ result_guard(result, &sqlite_free_table);
+
+ string ctx = string("db query [") + string(query) + "]: ";
+
+ if (errmsg)
+ throw oops(ctx + string("sqlite error ") + errmsg);
+ assert_sqlite_ok(rescode);
+
+ if (want_cols == 0 && ncol == 0) return;
+ if (want_rows == 0 && nrow == 0) return;
+ if (want_cols == any_rows && ncol == 0) return;
+ if (want_rows == any_rows && nrow == 0) return;
+
+ if (want_cols != any_cols &&
+ ncol != want_cols)
+ throw oops((F("%s wanted %d columns, got %s")
+ % ctx % want_cols % ncol).str());
+
+ if (want_rows != any_rows &&
+ nrow != want_rows)
+ throw oops((F("%s wanted %d rows, got %s")
+ % ctx % want_rows % nrow).str());
+
+ if (!result)
+ throw oops(ctx + "null result set");
+
+ for (int i = 0; i < ncol; ++i)
+ if (!result[i])
+ throw oops(ctx + "null column name");
+
+ for (int row = 0; row < nrow; ++row)
+ {
+ vector rowvec;
+ for (int col = 0; col < ncol; ++col)
+ {
+ int i = ((1 + row) * ncol) + col;
+ if (!result[i])
+ throw oops(ctx + "null result value");
+ else
+ rowvec.push_back(result[i]);
+ }
+ res.push_back(rowvec);
+ }
+}
+
+// general application-level logic
+
+void
+database::set_filename(fs::path const & file)
+{
+ if (__sql)
+ {
+ throw oops("cannot change filename to " + file.string() + " while db is open");
+ }
+ filename = file;
+}
+
+void
+database::begin_transaction()
+{
+ if (transaction_level == 0)
+ execute("BEGIN");
+ transaction_level++;
+}
+
+void
+database::commit_transaction()
+{
+ if (transaction_level == 1)
+ execute("COMMIT");
+ transaction_level--;
+}
+
+void
+database::rollback_transaction()
+{
+ if (transaction_level == 1)
+ execute("ROLLBACK");
+ transaction_level--;
+}
+
+
+bool
+database::exists(hexenc const & ident,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM '%q' WHERE id = '%q'",
+ table.c_str(), ident().c_str());
+ I((res.size() == 1) || (res.size() == 0));
+ return res.size() == 1;
+}
+
+
+bool
+database::delta_exists(hexenc const & ident,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM '%q' WHERE id = '%q'",
+ table.c_str(), ident().c_str());
+ return res.size() > 0;
+}
+
+bool
+database::delta_exists(hexenc const & ident,
+ hexenc const & base,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM '%q' WHERE id = '%q' AND base = '%q'",
+ table.c_str(), ident().c_str(), base().c_str());
+ I((res.size() == 1) || (res.size() == 0));
+ return res.size() == 1;
+}
+
+int
+database::count(string const & table)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT COUNT(*) FROM '%q'",
+ table.c_str());
+ return lexical_cast(res[0][0]);
+}
+
+void
+database::get(hexenc const & ident,
+ base64< gzip > & dat,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT data FROM '%q' WHERE id = '%q'",
+ table.c_str(), ident().c_str());
+
+ // consistency check
+ base64 > rdata(res[0][0]);
+ hexenc tid;
+ calculate_ident(rdata, tid);
+ I(tid == ident);
+
+ dat = rdata;
+}
+
+void
+database::get_delta(hexenc const & ident,
+ hexenc const & base,
+ base64< gzip > & del,
+ string const & table)
+{
+ I(ident() != "");
+ I(base() != "");
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT delta FROM '%q' WHERE id = '%q' AND base = '%q'",
+ table.c_str(), ident().c_str(), base().c_str());
+ del = res[0][0];
+}
+
+void
+database::put(hexenc const & ident,
+ base64< gzip > const & dat,
+ string const & table)
+{
+ // consistency check
+ I(ident() != "");
+ hexenc tid;
+ calculate_ident(dat, tid);
+ I(tid == ident);
+
+ execute("INSERT INTO '%q' VALUES('%q', '%q')",
+ table.c_str(), ident().c_str(), dat().c_str());
+}
+
+
+void
+database::put_delta(hexenc const & ident,
+ hexenc const & base,
+ base64 > const & del,
+ string const & table)
+{
+ // nb: delta schema is (id, base, delta)
+ I(ident() != "");
+ I(base() != "");
+ execute("INSERT INTO '%q' VALUES('%q', '%q', '%q')",
+ table.c_str(),
+ ident().c_str(), base().c_str(), del().c_str());
+}
+
+void
+database::get_version(hexenc const & ident,
+ base64< gzip > & dat,
+ string const & data_table,
+ string const & delta_table)
+{
+ I(ident() != "");
+ if (exists(ident, data_table))
+ {
+ // easy path
+ get(ident, dat, data_table);
+ }
+ else
+ {
+ // tricky path
+
+ // we start from the file we want to reconstruct and work *forwards*
+ // through the database, until we get to a full data object. we then
+ // trace back through the list of edges we followed to get to the data
+ // object, applying reverse deltas.
+ //
+ // the effect of this algorithm is breadth-first search, backwards
+ // through the storage graph, to discover a forwards shortest path, and
+ // then following that shortest path with delta application.
+ //
+ // we used to do this with the boost graph library, but it invovled
+ // loading too much of the storage graph into memory at any moment. this
+ // imperative version only loads the descendents of the reconstruction
+ // node, so it much cheaper in terms of memory.
+ //
+ // we also maintain a cycle-detecting set, just to be safe
+
+ L(F("reconstructing %s in %s\n") % ident % delta_table);
+ I(delta_exists(ident, delta_table));
+
+ // nb: an edge map goes in the direction of the
+ // delta, *not* the direction we discover things in,
+ // i.e. each map is of the form [newid] -> [oldid]
+
+ typedef map< hexenc, hexenc > edgemap;
+ list< shared_ptr > paths;
+
+ set< hexenc > frontier, cycles;
+ frontier.insert(ident);
+
+ bool found_root = false;
+ hexenc root("");
+
+ while (! found_root)
+ {
+ set< hexenc > next_frontier;
+ shared_ptr frontier_map(new edgemap());
+
+ I(!frontier.empty());
+
+ for (set< hexenc >::const_iterator i = frontier.begin();
+ i != frontier.end(); ++i)
+ {
+ if (exists(*i, data_table))
+ {
+ root = *i;
+ found_root = true;
+ break;
+ }
+ else
+ {
+ cycles.insert(*i);
+ results res;
+ fetch(res, one_col, any_rows, "SELECT base from '%q' WHERE id = '%q'",
+ delta_table.c_str(), (*i)().c_str());
+ for (size_t k = 0; k < res.size(); ++k)
+ {
+ hexenc const nxt(res[k][0]);
+
+ if (cycles.find(nxt) != cycles.end())
+ throw oops("cycle in table '" + delta_table + "', at node "
+ + (*i)() + " <- " + nxt());
+
+ next_frontier.insert(nxt);
+
+ if (frontier_map->find(nxt) == frontier_map->end())
+ {
+ L(F("inserting edge: %s <- %s\n") % (*i) % nxt);
+ frontier_map->insert(make_pair(nxt, *i));
+ }
+ else
+ L(F("skipping merge edge %s <- %s\n") % (*i) % nxt);
+ }
+ }
+ }
+ if (!found_root)
+ {
+ frontier = next_frontier;
+ paths.push_front(frontier_map);
+ }
+ }
+
+ // path built, now all we need to do is follow it back
+
+ I(found_root);
+ I(root() != "");
+ base64< gzip > begin_packed;
+ data begin;
+ get(root, begin_packed, data_table);
+ unpack(begin_packed, begin);
+ hexenc curr = root;
+
+ boost::shared_ptr app = new_piecewise_applicator();
+ app->begin(begin());
+
+ for (list< shared_ptr >::const_iterator p = paths.begin();
+ p != paths.end(); ++p)
+ {
+ shared_ptr i = *p;
+ I(i->find(curr) != i->end());
+ hexenc const nxt = i->find(curr)->second;
+
+ L(F("following delta %s -> %s\n") % curr % nxt);
+ base64< gzip > del_packed;
+ get_delta(nxt, curr, del_packed, delta_table);
+ delta del;
+ unpack(del_packed, del);
+ apply_delta (app, del());
+ app->next();
+ curr = nxt;
+ }
+
+ string tmp;
+ app->finish(tmp);
+ data end(tmp);
+
+ hexenc final;
+ calculate_ident(end, final);
+ I(final == ident);
+ pack(end, dat);
+ }
+}
+
+
+void
+database::drop(hexenc const & ident,
+ string const & table)
+{
+ execute("DELETE FROM '%q' WHERE id = '%q'",
+ table.c_str(),
+ ident().c_str());
+}
+
+void
+database::put_version(hexenc const & old_id,
+ hexenc const & new_id,
+ base64< gzip > const & del,
+ string const & data_table,
+ string const & delta_table)
+{
+
+ base64< gzip > old_data, new_data;
+ base64< gzip > reverse_delta;
+
+ get_version(old_id, old_data, data_table, delta_table);
+ patch(old_data, del, new_data);
+ diff(new_data, old_data, reverse_delta);
+
+ transaction_guard guard(*this);
+ if (exists(old_id, data_table))
+ {
+ // descendent of a head version replaces the head, therefore old head
+ // must be disposed of
+ drop(old_id, data_table);
+ }
+ put(new_id, new_data, data_table);
+ put_delta(old_id, new_id, reverse_delta, delta_table);
+ guard.commit();
+}
+
+void
+database::put_reverse_version(hexenc const & new_id,
+ hexenc const & old_id,
+ base64< gzip > const & reverse_del,
+ string const & data_table,
+ string const & delta_table)
+{
+ base64< gzip > old_data, new_data;
+
+ get_version(new_id, new_data, data_table, delta_table);
+ patch(new_data, reverse_del, old_data);
+ hexenc check;
+ calculate_ident(old_data, check);
+ I(old_id == check);
+
+ transaction_guard guard(*this);
+ put_delta(old_id, new_id, reverse_del, delta_table);
+ guard.commit();
+}
+
+
+
+// ------------------------------------------------------------
+// -- --
+// -- public interface follows --
+// -- --
+// ------------------------------------------------------------
+
+bool
+database::file_version_exists(file_id const & id)
+{
+ return delta_exists(id.inner(), "file_deltas")
+ || exists(id.inner(), "files");
+}
+
+bool
+database::manifest_version_exists(manifest_id const & id)
+{
+ return delta_exists(id.inner(), "manifest_deltas")
+ || exists(id.inner(), "manifests");
+}
+
+bool
+database::revision_exists(revision_id const & id)
+{
+ return exists(id.inner(), "revisions");
+}
+
+
+void
+database::get_file_version(file_id const & id,
+ file_data & dat)
+{
+ base64< gzip > tmp;
+ get_version(id.inner(), tmp, "files", "file_deltas");
+ dat = tmp;
+}
+
+void
+database::get_manifest_version(manifest_id const & id,
+ manifest_data & dat)
+{
+ base64< gzip > tmp;
+ get_version(id.inner(), tmp, "manifests", "manifest_deltas");
+ dat = tmp;
+}
+
+void
+database::get_manifest(manifest_id const & id,
+ manifest_map & mm)
+{
+ manifest_data mdat;
+ get_manifest_version(id, mdat);
+ read_manifest_map(mdat, mm);
+}
+
+
+void
+database::put_file(file_id const & id,
+ file_data const & dat)
+{
+ put(id.inner(), dat.inner(), "files");
+}
+
+void
+database::put_file_version(file_id const & old_id,
+ file_id const & new_id,
+ file_delta const & del)
+{
+ put_version(old_id.inner(), new_id.inner(), del.inner(),
+ "files", "file_deltas");
+}
+
+void
+database::put_file_reverse_version(file_id const & new_id,
+ file_id const & old_id,
+ file_delta const & del)
+{
+ put_reverse_version(new_id.inner(), old_id.inner(), del.inner(),
+ "files", "file_deltas");
+}
+
+
+void
+database::put_manifest(manifest_id const & id,
+ manifest_data const & dat)
+{
+ put(id.inner(), dat.inner(), "manifests");
+}
+
+void
+database::put_manifest_version(manifest_id const & old_id,
+ manifest_id const & new_id,
+ manifest_delta const & del)
+{
+ put_version(old_id.inner(), new_id.inner(), del.inner(),
+ "manifests", "manifest_deltas");
+}
+
+void
+database::put_manifest_reverse_version(manifest_id const & new_id,
+ manifest_id const & old_id,
+ manifest_delta const & del)
+{
+ put_reverse_version(new_id.inner(), old_id.inner(), del.inner(),
+ "manifests", "manifest_deltas");
+}
+
+
+void
+database::get_revision_ancestry(std::set > & graph)
+{
+ results res;
+ graph.clear();
+ fetch(res, 2, any_rows,
+ "SELECT parent,child FROM revision_ancestry");
+ for (size_t i = 0; i < res.size(); ++i)
+ graph.insert(std::make_pair(revision_id(res[i][0]),
+ revision_id(res[i][1])));
+}
+
+void
+database::get_revision_parents(revision_id const & id,
+ set & parents)
+{
+ results res;
+ parents.clear();
+ fetch(res, one_col, any_rows,
+ "SELECT parent FROM revision_ancestry WHERE child = '%q'",
+ id.inner()().c_str());
+ for (size_t i = 0; i < res.size(); ++i)
+ parents.insert(revision_id(res[i][0]));
+}
+
+void
+database::get_revision_children(revision_id const & id,
+ set & children)
+{
+ results res;
+ children.clear();
+ fetch(res, one_col, any_rows,
+ "SELECT child FROM revision_ancestry WHERE parent = '%q'",
+ id.inner()().c_str());
+ for (size_t i = 0; i < res.size(); ++i)
+ children.insert(revision_id(res[i][0]));
+}
+
+void
+database::get_revision_manifest(revision_id const & rid,
+ manifest_id & mid)
+{
+ revision_set rev;
+ get_revision(rid, rev);
+ mid = rev.new_manifest;
+}
+
+void
+database::get_revision(revision_id const & id,
+ revision_set & rev)
+{
+ revision_data d;
+ get_revision(id, d);
+ read_revision_set(d, rev);
+}
+
+void
+database::get_revision(revision_id const & id,
+ revision_data & dat)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT data FROM revisions WHERE id = '%q'",
+ id.inner()().c_str());
+
+ dat = revision_data(res[0][0]);
+
+ // verify that we got a revision with the right id
+ {
+ revision_id tmp;
+ calculate_ident(dat, tmp);
+ I(id == tmp);
+ }
+}
+
+void
+database::put_revision(revision_id const & new_id,
+ revision_set const & rev)
+{
+
+ I(!revision_exists(new_id));
+ revision_data d;
+
+ write_revision_set(rev, d);
+ revision_id tmp;
+ calculate_ident(d, tmp);
+ I(tmp == new_id);
+
+ transaction_guard guard(*this);
+
+ execute("INSERT INTO revisions VALUES('%q', '%q')",
+ new_id.inner()().c_str(),
+ d.inner()().c_str());
+
+ for (edge_map::const_iterator e = rev.edges.begin();
+ e != rev.edges.end(); ++e)
+ {
+ execute("INSERT INTO revision_ancestry VALUES('%q', '%q')",
+ edge_old_revision(e).inner()().c_str(),
+ new_id.inner()().c_str());
+ }
+
+ guard.commit();
+}
+
+void
+database::put_revision(revision_id const & new_id,
+ revision_data const & dat)
+{
+ revision_set rev;
+ read_revision_set(dat, rev);
+ put_revision(new_id, rev);
+}
+
+
+// crypto key management
+
+void
+database::get_key_ids(string const & pattern,
+ vector & pubkeys,
+ vector & privkeys)
+{
+ pubkeys.clear();
+ privkeys.clear();
+ results res;
+
+ if (pattern != "")
+ fetch(res, one_col, any_rows,
+ "SELECT id from public_keys WHERE id GLOB '%q'",
+ pattern.c_str());
+ else
+ fetch(res, one_col, any_rows,
+ "SELECT id from public_keys");
+
+ for (size_t i = 0; i < res.size(); ++i)
+ pubkeys.push_back(res[i][0]);
+
+ if (pattern != "")
+ fetch(res, one_col, any_rows,
+ "SELECT id from private_keys WHERE id GLOB '%q'",
+ pattern.c_str());
+ else
+ fetch(res, one_col, any_rows,
+ "SELECT id from private_keys");
+
+ for (size_t i = 0; i < res.size(); ++i)
+ privkeys.push_back(res[i][0]);
+}
+
+void
+database::get_private_keys(vector & privkeys)
+{
+ privkeys.clear();
+ results res;
+ fetch(res, one_col, any_rows, "SELECT id from private_keys");
+ for (size_t i = 0; i < res.size(); ++i)
+ privkeys.push_back(res[i][0]);
+}
+
+bool
+database::public_key_exists(hexenc const & hash)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM public_keys WHERE hash = '%q'",
+ hash().c_str());
+ I((res.size() == 1) || (res.size() == 0));
+ if (res.size() == 1)
+ return true;
+ return false;
+}
+
+bool
+database::public_key_exists(rsa_keypair_id const & id)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM public_keys WHERE id = '%q'",
+ id().c_str());
+ I((res.size() == 1) || (res.size() == 0));
+ if (res.size() == 1)
+ return true;
+ return false;
+}
+
+bool
+database::private_key_exists(rsa_keypair_id const & id)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM private_keys WHERE id = '%q'",
+ id().c_str());
+ I((res.size() == 1) || (res.size() == 0));
+ if (res.size() == 1)
+ return true;
+ return false;
+}
+
+bool
+database::key_exists(rsa_keypair_id const & id)
+{
+ return public_key_exists(id) || private_key_exists(id);
+}
+
+void
+database::get_pubkey(hexenc const & hash,
+ rsa_keypair_id & id,
+ base64 & pub_encoded)
+{
+ results res;
+ fetch(res, 2, one_row,
+ "SELECT id, keydata FROM public_keys where hash = '%q'",
+ hash().c_str());
+ id = res[0][0];
+ pub_encoded = res[0][1];
+}
+
+void
+database::get_key(rsa_keypair_id const & pub_id,
+ base64 & pub_encoded)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT keydata FROM public_keys where id = '%q'",
+ pub_id().c_str());
+ pub_encoded = res[0][0];
+}
+
+void
+database::get_key(rsa_keypair_id const & priv_id,
+ base64< arc4 > & priv_encoded)
+{
+ results res;
+ fetch(res, one_col, one_col,
+ "SELECT keydata FROM private_keys where id = '%q'",
+ priv_id().c_str());
+ priv_encoded = res[0][0];
+}
+
+
+void
+database::put_key(rsa_keypair_id const & pub_id,
+ base64 const & pub_encoded)
+{
+ hexenc thash;
+ key_hash_code(pub_id, pub_encoded, thash);
+ execute("INSERT INTO public_keys VALUES('%q', '%q', '%q')",
+ thash().c_str(), pub_id().c_str(), pub_encoded().c_str());
+}
+
+void
+database::put_key(rsa_keypair_id const & priv_id,
+ base64< arc4 > const & priv_encoded)
+{
+
+ hexenc thash;
+ key_hash_code(priv_id, priv_encoded, thash);
+ execute("INSERT INTO private_keys VALUES('%q', '%q', '%q')",
+ thash().c_str(), priv_id().c_str(), priv_encoded().c_str());
+}
+
+void
+database::put_key_pair(rsa_keypair_id const & id,
+ base64 const & pub_encoded,
+ base64< arc4 > const & priv_encoded)
+{
+ transaction_guard guard(*this);
+ put_key(id, pub_encoded);
+ put_key(id, priv_encoded);
+ guard.commit();
+}
+
+void
+database::delete_private_key(rsa_keypair_id const & pub_id)
+{
+ execute("DELETE FROM private_keys WHERE id = '%q'",
+ pub_id().c_str());
+}
+
+// cert management
+
+bool
+database::cert_exists(cert const & t,
+ string const & table)
+{
+ results res;
+ fetch(res, 1, any_rows,
+ "SELECT id FROM '%q' WHERE id = '%q' "
+ "AND name = '%q' AND value = '%q' "
+ "AND keypair = '%q' AND signature = '%q' ",
+ table.c_str(),
+ t.ident().c_str(),
+ t.name().c_str(),
+ t.value().c_str(),
+ t.key().c_str(),
+ t.sig().c_str());
+ I(res.size() == 0 || res.size() == 1);
+ return res.size() == 1;
+}
+
+void
+database::put_cert(cert const & t,
+ string const & table)
+{
+ hexenc thash;
+ cert_hash_code(t, thash);
+ execute("INSERT INTO '%q' VALUES('%q', '%q', '%q', '%q', '%q', '%q')",
+ table.c_str(),
+ thash().c_str(),
+ t.ident().c_str(),
+ t.name().c_str(),
+ t.value().c_str(),
+ t.key().c_str(),
+ t.sig().c_str());
+}
+
+void
+database::results_to_certs(results const & res,
+ vector & certs)
+{
+ certs.clear();
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ cert t;
+ t = cert(hexenc(res[i][0]),
+ cert_name(res[i][1]),
+ base64(res[i][2]),
+ rsa_keypair_id(res[i][3]),
+ base64(res[i][4]));
+ certs.push_back(t);
+ }
+}
+
+
+struct valid_certs
+{
+ set valid_signers;
+ hexenc ident;
+ cert_name name;
+ base64 val;
+ string signature_type;
+
+ valid_certs(string const & ty)
+ : signature_type(ty)
+ {
+ L(F("constructing validity checker for %s certs\n") % ty);
+ }
+
+ bool check_signer_trust(app_state & app)
+ {
+ bool trusted = false;
+
+ L(F("checking %d signer %s cert trust set\n")
+ % valid_signers.size() % signature_type);
+ try
+ {
+ cert_value v;
+ decode_base64(val, v);
+ // FIXME: lame string-makes-the-mode argument
+ if (signature_type == "revision")
+ trusted = app.lua.hook_get_revision_cert_trust(valid_signers,
+ ident, name, v);
+ else if (signature_type == "manifest")
+ trusted = app.lua.hook_get_manifest_cert_trust(valid_signers,
+ ident, name, v);
+ else if (signature_type == "file")
+ trusted = app.lua.hook_get_file_cert_trust(valid_signers,
+ ident, name, v);
+ else
+ I(false); // should be illegal
+ }
+ catch (...)
+ {
+ W(F("exception in sqlite valid_certs::check_set_trust\n"));
+ }
+
+ if (trusted)
+ L(F("trust function liked %d %s signers\n")
+ % valid_signers.size() % signature_type);
+ else
+ L(F("trust function disliked %d %s signers\n")
+ % valid_signers.size() % signature_type);
+
+ return trusted;
+ }
+
+ void check_single_signer(app_state & app,
+ int argc,
+ char const ** argv)
+ {
+ try
+ {
+ // args are: hash, id, name, value, keypair, pubkey, signature
+ // L(F("entries are [%s] [%s] [%s] [%s] [%s] [%s] [%s]\n")
+ // % argv[0] % argv[1] % argv[2] % argv[3] % argv[4] % argv[5] % argv[6]);
+
+ cert tmp = cert(hexenc(argv[1]),
+ cert_name(argv[2]),
+ base64(argv[3]),
+ rsa_keypair_id(argv[4]),
+ base64(argv[6]));
+
+ base64 pk(argv[5]);
+
+ if (ident().empty())
+ ident = tmp.ident;
+ else
+ I(ident == tmp.ident);
+
+ if (name().empty())
+ name = tmp.name;
+ else
+ I(name == tmp.name);
+
+ if (val().empty())
+ val = tmp.value;
+ else
+ I(val == tmp.value);
+
+ // L(F("examining '%s' %s cert from %s\n")
+ // % name % signature_type % ident);
+
+ string txt;
+ cert_signable_text(tmp, txt);
+ if (check_signature(app.lua, tmp.key, pk, txt, tmp.sig))
+ {
+ L(F("ok '%s' %s cert from %s\n")
+ % name % signature_type % tmp.key);
+ valid_signers.insert(tmp.key);
+ }
+ else
+ {
+ W(F("bad '%s' %s cert from %s\n")
+ % name % signature_type % tmp.key);
+ }
+ }
+ catch (std::exception & e)
+ {
+ W(F("std::exception in sqlite valid_certs::check_single_signer: %s\n") % e.what());
+ }
+ catch (...)
+ {
+ W(F("unknown exception in sqlite valid_certs::check_single_signer\n"));
+ }
+ }
+};
+
+extern "C"
+{
+
+static void
+trusted_step_callback(sqlite_func * fn_ctx,
+ int argc,
+ char const ** argv)
+{
+ app_state * app = NULL;
+ valid_certs ** vpp;
+
+ I(fn_ctx);
+ I(argc == 8);
+ I(argv);
+ for (size_t i = 0; i < 8; ++i)
+ I(argv[i]);
+
+ app = static_cast(sqlite_user_data(fn_ctx));
+ I(app);
+ vpp = static_cast(sqlite_aggregate_context(fn_ctx, sizeof(valid_certs *)));
+ I(vpp);
+ if (! (*vpp))
+ *vpp = new valid_certs(string(argv[0]));
+ I(*vpp);
+ (*vpp)->check_single_signer(*app, argc-1, argv+1);
+}
+
+static void
+trusted_finalize_callback(sqlite_func * fn_ctx)
+{
+ app_state * app = NULL;
+ valid_certs ** vpp;
+ app = static_cast(sqlite_user_data(fn_ctx));
+ I(app);
+ vpp = static_cast(sqlite_aggregate_context(fn_ctx, sizeof(valid_certs *)));
+
+ I(vpp);
+ I(*vpp);
+
+ if ((*vpp)->check_signer_trust(*app))
+ sqlite_set_result_int(fn_ctx, 1);
+ else
+ sqlite_set_result_int(fn_ctx, 0);
+
+ delete (*vpp);
+}
+}
+
+
+void
+database::install_functions(app_state * app)
+{
+ // register any functions we're going to use
+ I(sqlite_create_function(sql(), "unbase64", -1,
+ &sqlite_unbase64_fn,
+ NULL) == 0);
+
+ I(sqlite_create_aggregate(sql(), "trusted", 8,
+ &trusted_step_callback,
+ &trusted_finalize_callback,
+ app) == 0);
+}
+
+void
+database::install_views()
+{
+ // delete any existing views
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT name FROM sqlite_master WHERE type='view'");
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ execute("DROP VIEW '%q'", res[i][0].c_str());
+ }
+ // register any views we're going to use
+ execute(views_constant);
+}
+
+void
+database::get_heads(base64 const & branch,
+ std::set & heads)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT parent "
+ "FROM branch_heads "
+ "WHERE value = '%q'",
+ branch().c_str());
+ heads.clear();
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ heads.insert(revision_id(res[i][0]));
+ }
+}
+
+void
+database::get_certs(hexenc const & ident,
+ vector & certs,
+ string const & table)
+{
+ results res;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature FROM '%q' "
+ "WHERE id = '%q'",
+ table.c_str(),
+ ident().c_str());
+ results_to_certs(res, certs);
+}
+
+
+void
+database::get_certs(cert_name const & name,
+ vector & certs,
+ string const & table)
+{
+ results res;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM '%q' WHERE name = '%q'",
+ table.c_str(),
+ name().c_str());
+ results_to_certs(res, certs);
+}
+
+
+void
+database::get_certs(hexenc const & ident,
+ cert_name const & name,
+ vector & certs,
+ string const & table)
+{
+ results res;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM '%q' "
+ "WHERE id = '%q' AND name = '%q'",
+ table.c_str(),
+ ident().c_str(),
+ name().c_str());
+ results_to_certs(res, certs);
+}
+
+void
+database::get_certs(cert_name const & name,
+ base64 const & val,
+ vector & certs,
+ string const & table)
+{
+ results res;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM '%q' "
+ "WHERE name = '%q' AND value = '%q'",
+ table.c_str(),
+ name().c_str(),
+ val().c_str());
+ results_to_certs(res, certs);
+}
+
+
+void
+database::get_certs(hexenc const & ident,
+ cert_name const & name,
+ base64 const & value,
+ vector & certs,
+ string const & table)
+{
+ results res;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM '%q' "
+ "WHERE id = '%q' AND name = '%q' AND value = '%q'",
+ table.c_str(),
+ ident().c_str(),
+ name().c_str(),
+ value().c_str());
+ results_to_certs(res, certs);
+}
+
+
+
+bool
+database::revision_cert_exists(revision const & cert)
+{
+ return cert_exists(cert.inner(), "revision_certs");
+}
+
+bool
+database::manifest_cert_exists(manifest const & cert)
+{
+ return cert_exists(cert.inner(), "manifest_certs");
+}
+
+bool
+database::file_cert_exists(file const & cert)
+{
+ return cert_exists(cert.inner(), "file_certs");
+}
+
+void
+database::put_manifest_cert(manifest const & cert)
+{
+ put_cert(cert.inner(), "manifest_certs");
+}
+
+void
+database::put_revision_cert(revision const & cert)
+{
+ put_cert(cert.inner(), "revision_certs");
+}
+
+void
+database::put_file_cert(file const & cert)
+{
+ put_cert(cert.inner(), "file_certs");
+}
+
+void
+database::get_file_certs(cert_name const & name,
+ vector< file > & ts)
+{
+ vector certs;
+ get_certs(name, certs, "file_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_file_certs(file_id const & id,
+ cert_name const & name,
+ vector< file > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), name, certs, "file_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_file_certs(cert_name const & name,
+ base64 const & val,
+ vector< file > & ts)
+{
+ vector certs;
+ get_certs(name, val, certs, "file_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_file_certs(file_id const & id,
+ cert_name const & name,
+ base64 const & val,
+ vector< file > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), name, val, certs, "file_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_file_certs(file_id const & id,
+ vector< file > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), certs, "file_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+
+bool
+database::file_cert_exists(hexenc const & hash)
+{
+ results res;
+ vector certs;
+ fetch(res, one_col, any_rows,
+ "SELECT id "
+ "FROM file_certs "
+ "WHERE hash = '%q'",
+ hash().c_str());
+ I(res.size() == 0 || res.size() == 1);
+ return (res.size() == 1);
+}
+
+void
+database::get_file_cert(hexenc const & hash,
+ file & c)
+{
+ results res;
+ vector certs;
+ fetch(res, 5, one_row,
+ "SELECT id, name, value, keypair, signature "
+ "FROM file_certs "
+ "WHERE hash = '%q'",
+ hash().c_str());
+ results_to_certs(res, certs);
+ I(certs.size() == 1);
+ c = file(certs[0]);
+}
+
+
+
+void
+database::get_revision_certs(cert_name const & name,
+ vector< revision > & ts)
+{
+ vector certs;
+ get_certs(name, certs, "revision_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_revision_certs(revision_id const & id,
+ cert_name const & name,
+ vector< revision > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), name, certs, "revision_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_revision_certs(revision_id const & id,
+ cert_name const & name,
+ base64 const & val,
+ vector< revision > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), name, val, certs, "revision_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_revision_certs(cert_name const & name,
+ base64 const & val,
+ vector< revision > & ts)
+{
+ vector certs;
+ get_certs(name, val, certs, "revision_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_revision_certs(revision_id const & id,
+ vector< revision > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), certs, "revision_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_revision_cert(hexenc const & hash,
+ revision & c)
+{
+ results res;
+ vector certs;
+ fetch(res, 5, one_row,
+ "SELECT id, name, value, keypair, signature "
+ "FROM revision_certs "
+ "WHERE hash = '%q'",
+ hash().c_str());
+ results_to_certs(res, certs);
+ I(certs.size() == 1);
+ c = revision(certs[0]);
+}
+
+bool
+database::revision_cert_exists(hexenc const & hash)
+{
+ results res;
+ vector certs;
+ fetch(res, one_col, any_rows,
+ "SELECT id "
+ "FROM revision_certs "
+ "WHERE hash = '%q'",
+ hash().c_str());
+ I(res.size() == 0 || res.size() == 1);
+ return (res.size() == 1);
+}
+
+bool
+database::manifest_cert_exists(hexenc const & hash)
+{
+ results res;
+ vector certs;
+ fetch(res, one_col, any_rows,
+ "SELECT id "
+ "FROM manifest_certs "
+ "WHERE hash = '%q'",
+ hash().c_str());
+ I(res.size() == 0 || res.size() == 1);
+ return (res.size() == 1);
+}
+
+void
+database::get_manifest_cert(hexenc const & hash,
+ manifest & c)
+{
+ results res;
+ vector certs;
+ fetch(res, 5, one_row,
+ "SELECT id, name, value, keypair, signature "
+ "FROM manifest_certs "
+ "WHERE hash = '%q'",
+ hash().c_str());
+ results_to_certs(res, certs);
+ I(certs.size() == 1);
+ c = manifest(certs[0]);
+}
+
+void
+database::get_manifest_certs(manifest_id const & id,
+ vector< manifest > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), certs, "manifest_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+
+void
+database::get_manifest_certs(cert_name const & name,
+ vector< manifest > & ts)
+{
+ vector certs;
+ get_certs(name, certs, "manifest_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+void
+database::get_manifest_certs(manifest_id const & id,
+ cert_name const & name,
+ vector< manifest > & ts)
+{
+ vector certs;
+ get_certs(id.inner(), name, certs, "manifest_certs");
+ ts.clear();
+ copy(certs.begin(), certs.end(), back_inserter(ts));
+}
+
+
+// completions
+void
+database::complete(string const & partial,
+ set & completions)
+{
+ results res;
+ completions.clear();
+
+ fetch(res, 1, any_rows,
+ "SELECT id FROM revisions WHERE id GLOB '%q*'",
+ partial.c_str());
+
+ for (size_t i = 0; i < res.size(); ++i)
+ completions.insert(revision_id(res[i][0]));
+}
+
+
+void
+database::complete(string const & partial,
+ set & completions)
+{
+ results res;
+ completions.clear();
+
+ fetch(res, 1, any_rows,
+ "SELECT id FROM manifests WHERE id GLOB '%q*'",
+ partial.c_str());
+
+ for (size_t i = 0; i < res.size(); ++i)
+ completions.insert(manifest_id(res[i][0]));
+
+ res.clear();
+
+ fetch(res, 1, any_rows,
+ "SELECT id FROM manifest_deltas WHERE id GLOB '%q*'",
+ partial.c_str());
+
+ for (size_t i = 0; i < res.size(); ++i)
+ completions.insert(manifest_id(res[i][0]));
+}
+
+void
+database::complete(string const & partial,
+ set & completions)
+{
+ results res;
+ completions.clear();
+
+ fetch(res, 1, any_rows,
+ "SELECT id FROM files WHERE id GLOB '%q*'",
+ partial.c_str());
+
+ for (size_t i = 0; i < res.size(); ++i)
+ completions.insert(file_id(res[i][0]));
+
+ res.clear();
+
+ fetch(res, 1, any_rows,
+ "SELECT id FROM file_deltas WHERE id GLOB '%q*'",
+ partial.c_str());
+
+ for (size_t i = 0; i < res.size(); ++i)
+ completions.insert(file_id(res[i][0]));
+}
+
+using commands::selector_type;
+
+static void selector_to_certname(selector_type ty,
+ string & s)
+{
+ switch (ty)
+ {
+ case commands::sel_author:
+ s = author_cert_name;
+ break;
+ case commands::sel_branch:
+ s = branch_cert_name;
+ break;
+ case commands::sel_date:
+ s = date_cert_name;
+ break;
+ case commands::sel_tag:
+ s = tag_cert_name;
+ break;
+ case commands::sel_ident:
+ case commands::sel_unknown:
+ I(false); // don't do this.
+ break;
+ }
+}
+
+void database::complete(selector_type ty,
+ string const & partial,
+ vector > const & limit,
+ set & completions)
+{
+ completions.clear();
+
+ // step 1: the limit is transformed into an SQL select statement which
+ // selects a set of IDs from the manifest_certs table which match the
+ // limit. this is done by building an SQL select statement for each term
+ // in the limit and then INTERSECTing them all.
+
+ string lim = "(";
+ bool first_limit = true;
+ for (vector >::const_iterator i = limit.begin();
+ i != limit.end(); ++i)
+ {
+ if (first_limit)
+ first_limit = false;
+ else
+ lim += " INTERSECT ";
+
+ if (i->first == commands::sel_ident)
+ {
+ lim += "SELECT id FROM revision_certs ";
+ lim += (F("WHERE id GLOB '%s*'")
+ % i->second).str();
+ }
+ else if (i->first == commands::sel_unknown)
+ {
+ lim += "SELECT id FROM revision_certs ";
+ lim += (F(" WHERE (name='%s' OR name='%s' OR name='%s')")
+ % author_cert_name
+ % tag_cert_name
+ % branch_cert_name).str();
+ lim += (F(" AND unbase64(value) glob '*%s*'")
+ % i->second).str();
+ }
+ else
+ {
+ string certname;
+ selector_to_certname(i->first, certname);
+ lim += "SELECT id FROM revision_certs ";
+ lim += (F("WHERE name='%s' AND unbase64(value) glob '*%s*'")
+ % certname % i->second).str();
+ }
+ }
+ lim += ")";
+
+ // step 2: depending on what we've been asked to disambiguate, we
+ // will complete either some idents, or cert values, or "unknown"
+ // which generally means "author, tag or branch"
+
+ string query;
+ if (ty == commands::sel_ident)
+ {
+ query = (F("SELECT id FROM %s") % lim).str();
+ }
+ else
+ {
+ query = "SELECT value FROM revision_certs WHERE";
+ if (ty == commands::sel_unknown)
+ {
+ query +=
+ (F(" (name='%s' OR name='%s' OR name='%s')")
+ % author_cert_name
+ % tag_cert_name
+ % branch_cert_name).str();
+ }
+ else
+ {
+ string certname;
+ selector_to_certname(ty, certname);
+ query +=
+ (F(" (name='%s')") % certname).str();
+ }
+
+ query += (F(" AND (unbase64(value) GLOB '*%s*')") % partial).str();
+ query += (F(" AND (id IN %s)") % lim).str();
+ }
+
+ results res;
+ fetch(res, one_col, any_rows, query.c_str());
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ if (ty == commands::sel_ident)
+ completions.insert(res[i][0]);
+ else
+ {
+ base64 row_encoded(res[i][0]);
+ data row_decoded;
+ decode_base64(row_encoded, row_decoded);
+ completions.insert(row_decoded());
+ }
+ }
+}
+
+
+// merkle nodes
+
+bool
+database::merkle_node_exists(string const & type,
+ utf8 const & collection,
+ size_t level,
+ hexenc const & prefix)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT COUNT(*) "
+ "FROM merkle_nodes "
+ "WHERE type = '%q' "
+ "AND collection = '%q' "
+ "AND level = %d "
+ "AND prefix = '%q' ",
+ type.c_str(), collection().c_str(), level, prefix().c_str());
+ size_t n_nodes = lexical_cast(res[0][0]);
+ I(n_nodes == 0 || n_nodes == 1);
+ return n_nodes == 1;
+}
+
+void
+database::get_merkle_node(string const & type,
+ utf8 const & collection,
+ size_t level,
+ hexenc const & prefix,
+ base64 & node)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT body "
+ "FROM merkle_nodes "
+ "WHERE type = '%q' "
+ "AND collection = '%q' "
+ "AND level = %d "
+ "AND prefix = '%q'",
+ type.c_str(), collection().c_str(), level, prefix().c_str());
+ node = res[0][0];
+}
+
+void
+database::put_merkle_node(string const & type,
+ utf8 const & collection,
+ size_t level,
+ hexenc const & prefix,
+ base64 const & node)
+{
+ execute("INSERT OR REPLACE "
+ "INTO merkle_nodes "
+ "VALUES ('%q', '%q', %d, '%q', '%q')",
+ type.c_str(), collection().c_str(), level, prefix().c_str(), node().c_str());
+}
+
+void
+database::erase_merkle_nodes(string const & type,
+ utf8 const & collection)
+{
+ execute("DELETE FROM merkle_nodes "
+ "WHERE type = '%q' "
+ "AND collection = '%q'",
+ type.c_str(), collection().c_str());
+}
+
+// transaction guards
+
+transaction_guard::transaction_guard(database & d) : committed(false), db(d)
+{
+ db.begin_transaction();
+}
+transaction_guard::~transaction_guard()
+{
+ if (committed)
+ db.commit_transaction();
+ else
+ db.rollback_transaction();
+}
+
+void
+transaction_guard::commit()
+{
+ committed = true;
+}
+
============================================================
--- tests/test_a_merge/left 7e718789c8e733f9a3e9bafd9843f8e5c4bfbdc8
+++ tests/test_a_merge/left 7e718789c8e733f9a3e9bafd9843f8e5c4bfbdc8
@@ -0,0 +1,2231 @@
+// copyright (C) 2002, 2003 graydon hoare
+// all rights reserved.
+// licensed to the public under the terms of the GNU GPL (>= 2)
+// see the file COPYING for details
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+
+#include "app_state.hh"
+#include "cert.hh"
+#include "cleanup.hh"
+#include "constants.hh"
+#include "database.hh"
+#include "keys.hh"
+#include "sanity.hh"
+#include "schema_migration.hh"
+#include "cert.hh"
+#include "transforms.hh"
+#include "ui.hh"
+#include "vocab.hh"
+#include "xdelta.hh"
+
+// defined in schema.sql, converted to header:
+#include "schema.h"
+
+// defined in views.sql, converted to header:
+#include "views.h"
+
+// this file defines a public, typed interface to the database.
+// the database class encapsulates all knowledge about sqlite,
+// the schema, and all SQL statements used to access the schema.
+//
+// see file schema.sql for the text of the schema.
+
+using boost::shared_ptr;
+using boost::lexical_cast;
+using namespace std;
+
+int const one_row = 1;
+int const one_col = 1;
+int const any_rows = -1;
+int const any_cols = -1;
+
+extern "C" {
+ // strangely this isn't declared, even though it's present in my sqlite.
+ char *sqlite_vmprintf(const char *zFormat, va_list);
+}
+
+database::database(fs::path const & fn) :
+ filename(fn),
+ // nb. update this if you change the schema. unfortunately we are not
+ // using self-digesting schemas due to comment irregularities and
+ // non-alphabetic ordering of tables in sql source files. we could create
+ // a temporary db, write our intended schema into it, and read it back,
+ // but this seems like it would be too rude. possibly revisit this issue.
+ schema("c1e86588e11ad07fa53e5d294edc043ce1d4005a"),
+ __sql(NULL),
+ transaction_level(0)
+{}
+
+void
+database::check_schema()
+{
+ string db_schema_id;
+ calculate_schema_id (__sql, db_schema_id);
+ N (schema == db_schema_id,
+ F("database schemas do not match: "
+ "wanted %s, got %s. try migrating database")
+ % schema % db_schema_id);
+}
+
+static void
+sqlite_unbase64_fn(sqlite_func *f, int nargs, char const ** args)
+{
+ if (nargs != 1)
+ {
+ sqlite_set_result_error(f, "need exactly 1 arg to unbase64()", -1);
+ return;
+ }
+ data decoded;
+ decode_base64(base64(string(args[0])), decoded);
+ sqlite_set_result_string(f, decoded().c_str(), decoded().size());
+}
+
+void
+database::set_app(app_state * app)
+{
+ __app = app;
+}
+
+struct sqlite *
+database::sql(bool init)
+{
+ if (! __sql)
+ {
+ if (! init)
+ {
+ if (filename.string() == "")
+ throw informative_failure(string("no database specified"));
+ else if (! fs::exists(filename))
+ throw informative_failure(string("database ") + filename.string() +
+ string(" does not exist"));
+ }
+ N(filename.string() != "",
+ F("need database name"));
+ char * errmsg = NULL;
+ __sql = sqlite_open(filename.string().c_str(), 0755, &errmsg);
+ if (! __sql)
+ throw oops(string("could not open database: ") + filename.string() +
+ (errmsg ? (": " + string(errmsg)) : ""));
+ if (init)
+ execute(schema_constant);
+
+ check_schema();
+ install_functions(__app);
+ install_views();
+ }
+ return __sql;
+}
+
+void
+database::initialize()
+{
+ if (__sql)
+ throw oops("cannot initialize database while it is open");
+
+ N(!fs::exists(filename),
+ F("could not initialize database: %s: already exists")
+ % filename.string());
+
+ fs::path journal = mkpath(filename.string() + "-journal");
+ N(!fs::exists(journal),
+ F("existing (possibly stale) journal file '%s' has same stem as new database '%s'")
+ % journal.string() % filename.string());
+
+ sqlite *s = sql(true);
+ I(s != NULL);
+}
+
+
+struct
+dump_request
+{
+ dump_request() {};
+ struct sqlite *sql;
+ string table_name;
+ ostream *out;
+};
+
+static int
+dump_row_cb(void *data, int n, char **vals, char **cols)
+{
+ dump_request *dump = reinterpret_cast(data);
+ I(dump != NULL);
+ I(vals != NULL);
+ I(dump->out != NULL);
+ *(dump->out) << F("INSERT INTO %s VALUES(") % dump->table_name;
+ for (int i = 0; i < n; ++i)
+ {
+ if (i != 0)
+ *(dump->out) << ',';
+
+ if (vals[i] == NULL)
+ *(dump->out) << "NULL";
+ else
+ {
+ *(dump->out) << "'";
+ for (char *cp = vals[i]; *cp; ++cp)
+ {
+ if (*cp == '\'')
+ *(dump->out) << "''";
+ else
+ *(dump->out) << *cp;
+ }
+ *(dump->out) << "'";
+ }
+ }
+ *(dump->out) << ");\n";
+ return 0;
+}
+
+static int
+dump_table_cb(void *data, int n, char **vals, char **cols)
+{
+ dump_request *dump = reinterpret_cast(data);
+ I(dump != NULL);
+ I(dump->sql != NULL);
+ I(vals != NULL);
+ I(vals[0] != NULL);
+ I(vals[1] != NULL);
+ I(vals[2] != NULL);
+ I(n == 3);
+ if (string(vals[1]) == "table")
+ {
+ *(dump->out) << vals[2] << ";\n";
+ dump->table_name = string(vals[0]);
+ sqlite_exec_printf(dump->sql, "SELECT * FROM '%q'",
+ dump_row_cb, data, NULL, vals[0]);
+ }
+ return 0;
+}
+
+void
+database::dump(ostream & out)
+{
+ dump_request req;
+ req.out = &out;
+ req.sql = sql();
+ out << "BEGIN TRANSACTION;\n";
+ int res = sqlite_exec(req.sql,
+ "SELECT name, type, sql FROM sqlite_master "
+ "WHERE type='table' AND sql NOT NULL "
+ "ORDER BY substr(type,2,1), name",
+ dump_table_cb, &req, NULL);
+ I(res == SQLITE_OK);
+ out << "COMMIT;\n";
+}
+
+void
+database::load(istream & in)
+{
+ char buf[constants::bufsz];
+ string tmp;
+
+ N(filename.string() != "",
+ F("need database name"));
+ char * errmsg = NULL;
+ __sql = sqlite_open(filename.string().c_str(), 0755, &errmsg);
+ if (! __sql)
+ throw oops(string("could not open database: ") + filename.string() +
+ (errmsg ? (": " + string(errmsg)) : ""));
+
+ while(in)
+ {
+ in.read(buf, constants::bufsz);
+ tmp.append(buf, in.gcount());
+ }
+
+ execute(tmp.c_str());
+}
+
+
+void
+database::debug(string const & sql, ostream & out)
+{
+ results res;
+ fetch(res, any_cols, any_rows, sql.c_str());
+ out << "'" << sql << "' -> " << res.size() << " rows\n" << endl;
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ for (size_t j = 0; j < res[i].size(); ++j)
+ {
+ if (j != 0)
+ out << " | ";
+ out << res[i][j];
+ }
+ out << endl;
+ }
+}
+
+unsigned long
+database::get_statistic(string const & query)
+{
+ results res;
+ fetch(res, 1, 1, query.c_str());
+ return lexical_cast(res[0][0]);
+}
+
+void
+database::info(ostream & out)
+{
+ string id;
+ calculate_schema_id(sql(), id);
+ out << "schema version : " << id << endl;
+ out << "full manifests : " << get_statistic("SELECT COUNT(*) FROM manifests") << endl;
+ out << "manifest deltas : " << get_statistic("SELECT COUNT(*) FROM manifest_deltas") << endl;
+ out << "full files : " << get_statistic("SELECT COUNT(*) FROM files") << endl;
+ out << "file deltas : " << get_statistic("SELECT COUNT(*) FROM file_deltas") << endl;
+}
+
+void
+database::version(ostream & out)
+{
+ string id;
+ calculate_schema_id(sql(), id);
+ out << "database schema version: " << id << endl;
+}
+
+void
+database::migrate()
+{
+ N(filename.string() != "",
+ F("need database name"));
+ char * errmsg = NULL;
+ __sql = sqlite_open(filename.string().c_str(), 0755, &errmsg);
+ if (! __sql)
+ throw oops(string("could not open database: ") + filename.string() +
+ (errmsg ? (": " + string(errmsg)) : ""));
+ migrate_monotone_schema(__sql);
+ sqlite_close(__sql);
+}
+
+void
+database::rehash()
+{
+ transaction_guard guard(*this);
+ ticker mcerts("mcerts", "m", 1);
+ ticker fcerts("fcerts", "f", 1);
+ ticker pubkeys("pubkeys", "+", 1);
+ ticker privkeys("privkeys", "!", 1);
+
+ {
+ // rehash all mcerts
+ results res;
+ vector certs;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM manifest_certs");
+ results_to_certs(res, certs);
+ execute("DELETE FROM manifest_certs");
+ for(vector::const_iterator i = certs.begin(); i != certs.end(); ++i)
+ {
+ put_cert(*i, "manifest_certs");
+ ++mcerts;
+ }
+ }
+
+ {
+ // rehash all fcerts
+ results res;
+ vector certs;
+ fetch(res, 5, any_rows,
+ "SELECT id, name, value, keypair, signature "
+ "FROM file_certs");
+ results_to_certs(res, certs);
+ execute("DELETE FROM file_certs");
+ for(vector::const_iterator i = certs.begin(); i != certs.end(); ++i)
+ {
+ put_cert(*i, "file_certs");
+ ++fcerts;
+ }
+ }
+
+
+ {
+ // rehash all pubkeys
+ results res;
+ fetch(res, 2, any_rows, "SELECT id, keydata FROM public_keys");
+ execute("DELETE FROM public_keys");
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ hexenc tmp;
+ key_hash_code(rsa_keypair_id(res[i][0]), base64(res[i][1]), tmp);
+ execute("INSERT INTO public_keys VALUES('%q', '%q', '%q')",
+ tmp().c_str(), res[i][0].c_str(), res[i][1].c_str());
+ ++pubkeys;
+ }
+ }
+
+{
+ // rehash all privkeys
+ results res;
+ fetch(res, 2, any_rows, "SELECT id, keydata FROM private_keys");
+ execute("DELETE FROM private_keys");
+ for (size_t i = 0; i < res.size(); ++i)
+ {
+ hexenc tmp;
+ key_hash_code(rsa_keypair_id(res[i][0]), base64< arc4 >(res[i][1]), tmp);
+ execute("INSERT INTO private_keys VALUES('%q', '%q', '%q')",
+ tmp().c_str(), res[i][0].c_str(), res[i][1].c_str());
+ ++privkeys;
+ }
+ }
+
+ guard.commit();
+}
+
+void
+database::ensure_open()
+{
+ sqlite *s = sql();
+ I(s != NULL);
+}
+
+database::~database()
+{
+ if (__sql)
+ {
+ sqlite_close(__sql);
+ __sql = 0;
+ }
+}
+
+static void
+assert_sqlite_ok(int res)
+{
+ switch (res)
+ {
+ case SQLITE_OK:
+ break;
+
+ case SQLITE_ERROR:
+ throw oops("SQL error or missing database");
+ break;
+
+ case SQLITE_INTERNAL:
+ throw oops("An internal logic error in SQLite");
+ break;
+
+ case SQLITE_PERM:
+ throw oops("Access permission denied");
+ break;
+
+ case SQLITE_ABORT:
+ throw oops("Callback routine requested an abort");
+ break;
+
+ case SQLITE_BUSY:
+ throw oops("The database file is locked");
+ break;
+
+ case SQLITE_LOCKED:
+ throw oops("A table in the database is locked");
+ break;
+
+ case SQLITE_NOMEM:
+ throw oops("A malloc() failed");
+ break;
+
+ case SQLITE_READONLY:
+ throw oops("Attempt to write a readonly database");
+ break;
+
+ case SQLITE_INTERRUPT:
+ throw oops("Operation terminated by sqlite_interrupt()");
+ break;
+
+ case SQLITE_IOERR:
+ throw oops("Some kind of disk I/O error occurred");
+ break;
+
+ case SQLITE_CORRUPT:
+ throw oops("The database disk image is malformed");
+ break;
+
+ case SQLITE_NOTFOUND:
+ throw oops("(Internal Only) Table or record not found");
+ break;
+
+ case SQLITE_FULL:
+ throw oops("Insertion failed because database is full");
+ break;
+
+ case SQLITE_CANTOPEN:
+ throw oops("Unable to open the database file");
+ break;
+
+ case SQLITE_PROTOCOL:
+ throw oops("database lock protocol error");
+ break;
+
+ case SQLITE_EMPTY:
+ throw oops("(Internal Only) database table is empty");
+ break;
+
+ case SQLITE_SCHEMA:
+ throw oops("The database schema changed");
+ break;
+
+ case SQLITE_TOOBIG:
+ throw oops("Too much data for one row of a table");
+ break;
+
+ case SQLITE_CONSTRAINT:
+ throw oops("Abort due to contraint violation");
+ break;
+
+ case SQLITE_MISMATCH:
+ throw oops("Data type mismatch");
+ break;
+
+ case SQLITE_MISUSE:
+ throw oops("Library used incorrectly");
+ break;
+
+ default:
+ throw oops(string("Unknown DB result code: ") + lexical_cast(res));
+ break;
+ }
+}
+
+void
+database::execute(char const * query, ...)
+{
+ va_list ap;
+ int res;
+ char * errmsg = NULL;
+
+ va_start(ap, query);
+
+ // log it
+ char * formatted = sqlite_vmprintf(query, ap);
+ string qq(formatted);
+ if (qq.size() > constants::db_log_line_sz)
+ qq = qq.substr(0, constants::db_log_line_sz) + string(" ...");
+ L(F("db.execute(\"%s\")\n") % qq);
+ sqlite_freemem(formatted);
+
+ va_end(ap);
+ va_start(ap, query);
+
+ // do it
+ res = sqlite_exec_vprintf(sql(), query, NULL, NULL, &errmsg, ap);
+
+ va_end(ap);
+
+ if (errmsg)
+ throw oops(string("sqlite exec error ") + errmsg);
+
+ assert_sqlite_ok(res);
+
+}
+
+void
+database::fetch(results & res,
+ int const want_cols,
+ int const want_rows,
+ char const * query, ...)
+{
+ char ** result = NULL;
+ int nrow;
+ int ncol;
+ char * errmsg = NULL;
+ int rescode;
+
+ va_list ap;
+ res.clear();
+ res.resize(0);
+ va_start(ap, query);
+
+ // log it
+ char * formatted = sqlite_vmprintf(query, ap);
+ string qq(formatted);
+ if (qq.size() > constants::log_line_sz)
+ qq = qq.substr(0, constants::log_line_sz) + string(" ...");
+ L(F("db.fetch(\"%s\")\n") % qq);
+ sqlite_freemem(formatted);
+
+ va_end(ap);
+ va_start(ap, query);
+
+ // do it
+ rescode = sqlite_get_table_vprintf(sql(), query, &result, &nrow, &ncol, &errmsg, ap);
+
+ va_end(ap);
+
+ cleanup_ptr
+ result_guard(result, &sqlite_free_table);
+
+ string ctx = string("db query [") + string(query) + "]: ";
+
+ if (errmsg)
+ throw oops(ctx + string("sqlite error ") + errmsg);
+ assert_sqlite_ok(rescode);
+
+ if (want_cols == 0 && ncol == 0) return;
+ if (want_rows == 0 && nrow == 0) return;
+ if (want_cols == any_rows && ncol == 0) return;
+ if (want_rows == any_rows && nrow == 0) return;
+
+ if (want_cols != any_cols &&
+ ncol != want_cols)
+ throw oops((F("%s wanted %d columns, got %s")
+ % ctx % want_cols % ncol).str());
+
+ if (want_rows != any_rows &&
+ nrow != want_rows)
+ throw oops((F("%s wanted %d rows, got %s")
+ % ctx % want_rows % nrow).str());
+
+ if (!result)
+ throw oops(ctx + "null result set");
+
+ for (int i = 0; i < ncol; ++i)
+ if (!result[i])
+ throw oops(ctx + "null column name");
+
+ for (int row = 0; row < nrow; ++row)
+ {
+ vector rowvec;
+ for (int col = 0; col < ncol; ++col)
+ {
+ int i = ((1 + row) * ncol) + col;
+ if (!result[i])
+ throw oops(ctx + "null result value");
+ else
+ rowvec.push_back(result[i]);
+ }
+ res.push_back(rowvec);
+ }
+}
+
+// general application-level logic
+
+void
+database::set_filename(fs::path const & file)
+{
+ if (__sql)
+ {
+ throw oops("cannot change filename to " + file.string() + " while db is open");
+ }
+ filename = file;
+}
+
+void
+database::begin_transaction()
+{
+ if (transaction_level == 0)
+ execute("BEGIN");
+ transaction_level++;
+}
+
+void
+database::commit_transaction()
+{
+ if (transaction_level == 1)
+ execute("COMMIT");
+ transaction_level--;
+}
+
+void
+database::rollback_transaction()
+{
+ if (transaction_level == 1)
+ execute("ROLLBACK");
+ transaction_level--;
+}
+
+
+bool
+database::exists(hexenc const & ident,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM '%q' WHERE id = '%q'",
+ table.c_str(), ident().c_str());
+ I((res.size() == 1) || (res.size() == 0));
+ return res.size() == 1;
+}
+
+
+bool
+database::delta_exists(hexenc const & ident,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM '%q' WHERE id = '%q'",
+ table.c_str(), ident().c_str());
+ return res.size() > 0;
+}
+
+bool
+database::delta_exists(hexenc const & ident,
+ hexenc const & base,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, any_rows,
+ "SELECT id FROM '%q' WHERE id = '%q' AND base = '%q'",
+ table.c_str(), ident().c_str(), base().c_str());
+ I((res.size() == 1) || (res.size() == 0));
+ return res.size() == 1;
+}
+
+int
+database::count(string const & table)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT COUNT(*) FROM '%q'",
+ table.c_str());
+ return lexical_cast(res[0][0]);
+}
+
+void
+database::get(hexenc const & ident,
+ base64< gzip > & dat,
+ string const & table)
+{
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT data FROM '%q' WHERE id = '%q'",
+ table.c_str(), ident().c_str());
+
+ // consistency check
+ base64 > rdata(res[0][0]);
+ hexenc tid;
+ calculate_ident(rdata, tid);
+ I(tid == ident);
+
+ dat = rdata;
+}
+
+void
+database::get_delta(hexenc const & ident,
+ hexenc const & base,
+ base64< gzip > & del,
+ string const & table)
+{
+ I(ident() != "");
+ I(base() != "");
+ results res;
+ fetch(res, one_col, one_row,
+ "SELECT delta FROM '%q' WHERE id = '%q' AND base = '%q'",
+ table.c_str(), ident().c_str(), base().c_str());
+ del = res[0][0];
+}
+
+void
+database::put(hexenc const & ident,
+ base64< gzip > const & dat,
+ string const & table)
+{
+ // consistency check
+ I(ident() != "");
+ hexenc tid;
+ calculate_ident(dat, tid);
+ I(tid == ident);
+
+ execute("INSERT INTO '%q' VALUES('%q', '%q')",
+ table.c_str(), ident().c_str(), dat().c_str());
+}
+
+
+void
+database::put_delta(hexenc const & ident,
+ hexenc const & base,
+ base64 > const & del,
+ string const & table)
+{
+ // nb: delta schema is (id, base, delta)
+ I(ident() != "");
+ I(base() != "");
+ execute("INSERT INTO '%q' VALUES('%q', '%q', '%q')",
+ table.c_str(),
+ ident().c_str(), base().c_str(), del().c_str());
+}
+
+void
+database::get_version(hexenc const & ident,
+ base64< gzip > & dat,
+ string const & data_table,
+ string const & delta_table)
+{
+ I(ident() != "");
+ if (exists(ident, data_table))
+ {
+ // easy path
+ get(ident, dat, data_table);
+ }
+ else
+ {
+ // tricky path
+
+ // we start from the file we want to reconstruct and work *forwards*
+ // through the database, until we get to a full data object. we then
+ // trace back through the list of edges we followed to get to the data
+ // object, applying reverse deltas.
+ //
+ // the effect of this algorithm is breadth-first search, backwards
+ // through the storage graph, to discover a forwards shortest path, and
+ // then following that shortest path with delta application.
+ //
+ // we used to do this with the boost graph library, but it invovled
+ // loading too much of the storage graph into memory at any moment. this
+ // imperative version only loads the descendents of the reconstruction
+ // node, so it much cheaper in terms of memory.
+ //
+ // we also maintain a cycle-detecting set, just to be safe
+
+ L(F("reconstructing %s in %s\n") % ident % delta_table);
+ I(delta_exists(ident, delta_table));
+
+ // nb: an edge map goes in the direction of the
+ // delta, *not* the direction we discover things in,
+ // i.e. each map is of the form [newid] -> [oldid]
+
+ typedef map< hexenc, hexenc > edgemap;
+ list< shared_ptr > paths;
+
+ set< hexenc > frontier, cycles;
+ frontier.insert(ident);
+
+ bool found_root = false;
+ hexenc root("");
+
+ while (! found_root)
+ {
+ set< hexenc > next_frontier;
+ shared_ptr frontier_map(new edgemap());
+
+ I(!frontier.empty());
+
+ for (set< hexenc >::const_iterator i = frontier.begin();
+ i != frontier.end(); ++i)
+ {
+ if (exists(*i, data_table))
+ {
+ root = *i;
+ found_root = true;
+ break;
+ }
+ else
+ {
+ cycles.insert(*i);
+ results res;
+ fetch(res, one_col, any_rows, "SELECT base from '%q' WHERE id = '%q'",
+ delta_table.c_str(), (*i)().c_str());
+ for (size_t k = 0; k < res.size(); ++k)
+ {
+ hexenc const nxt(res[k][0]);
+
+ if (cycles.find(nxt) != cycles.end())
+ throw oops("cycle in table '" + delta_table + "', at node "
+ + (*i)() + " <- " + nxt());
+
+ next_frontier.insert(nxt);
+
+ if (frontier_map->find(nxt) == frontier_map->end())
+ {
+ L(F("inserting edge: %s <- %s\n") % (*i) % nxt);
+ frontier_map->insert(make_pair(nxt, *i));
+ }
+ else
+ L(F("skipping merge edge %s <- %s\n") % (*i) % nxt);
+ }
+ }
+ }
+ if (!found_root)
+ {
+ frontier = next_frontier;
+ paths.push_front(frontier_map);
+ }
+ }
+
+ // path built, now all we need to do is follow it back
+
+ I(found_root);
+ I(root() != "");
+ base64< gzip > begin_packed;
+ data begin;
+ get(root, begin_packed, data_table);
+ unpack(begin_packed, begin);
+ hexenc curr = root;
+
+ boost::shared_ptr app = new_piecewise_applicator();
+ app->begin(begin());
+
+ for (list< shared_ptr >::const_iterator p = paths.begin();
+ p != paths.end(); ++p)
+ {
+ shared_ptr i = *p;
+ I(i->find(curr) != i->end());
+ hexenc const nxt = i->find(curr)->second;
+
+ L(F("following delta %s -> %s\n") % curr % nxt);
+ base64< gzip > del_packed;
+ get_delta(nxt, curr, del_packed, delta_table);
+ delta del;
+ unpack(del_packed, del);
+ apply_delta (app, del());
+ app->next();
+ curr = nxt;
+ }
+
+ string tmp;
+ app->finish(tmp);
+ data end(tmp);
+
+ hexenc final;
+ calculate_ident(end, final);
+ I(final == ident);
+ pack(end, dat);
+ }
+}
+
+
+void
+database::drop(hexenc