# # # patch "database.cc" # from [40ff00daadefb82b98ce4e263753f5e8130910af] # to [ca8dca6588274c6f6b60f304de7d138eb0ab0472] # # patch "schema.sql" # from [cf9587d6168507c6fe801b0948b3d1117741f59b] # to [2ce5689c47a0528abceb3813f431fb94b89e62ca] # # patch "schema_migration.cc" # from [d29a88209eb9605e602510c255ad7cf9f4095386] # to [b09bce5e9684764dc58c0897b82ae416531472f2] # ============================================================ --- database.cc 40ff00daadefb82b98ce4e263753f5e8130910af +++ database.cc ca8dca6588274c6f6b60f304de7d138eb0ab0472 @@ -72,7 +72,7 @@ // non-alphabetic ordering of tables in sql source files. we could create // a temporary db, write our intended schema into it, and read it back, // but this seems like it would be too rude. possibly revisit this issue. - schema("bd86f9a90b5d552f0be1fa9aee847ea0f317778b"), + schema("acf96bb0bd230523fe5fa7621864fa252c3cf11c"), __sql(NULL), transaction_level(0) {} @@ -783,9 +783,16 @@ fetch(res, one_col, one_row, query.c_str(), ident().c_str()); // consistency check + data rdata_unpacked; + if (table=="files") + { gzip rdata(res[0][0]); + decode_gzip(rdata,rdata_unpacked); + } + else + { base64 > rdata(res[0][0]); - data rdata_unpacked; unpack(rdata, rdata_unpacked); + } hexenc tid; calculate_ident(rdata_unpacked, tid); @@ -807,8 +814,15 @@ fetch(res, one_col, one_row, query.c_str(), ident().c_str(), base().c_str()); + if (table=="file_deltas") + { gzip del_packed(res[0][0]); + decode_gzip(del_packed, del); + } + else + { base64 > del_packed = res[0][0]; unpack(del_packed, del); + } } void ============================================================ --- schema.sql cf9587d6168507c6fe801b0948b3d1117741f59b +++ schema.sql 2ce5689c47a0528abceb3813f431fb94b89e62ca @@ -22,14 +22,14 @@ CREATE TABLE files ( id primary key, -- strong hash of file contents - data not null -- compressed, encoded contents of a file + data not null -- compressed contents of a file ); CREATE TABLE file_deltas ( id not null, -- strong hash of file contents base not null, -- joins with files.id or file_deltas.id - delta not null, -- rdiff to construct current from base + delta not null, -- compressed rdiff to construct current from base unique(id, base) ); ============================================================ --- schema_migration.cc d29a88209eb9605e602510c255ad7cf9f4095386 +++ schema_migration.cc b09bce5e9684764dc58c0897b82ae416531472f2 @@ -854,6 +854,96 @@ return true; } +// I hate to duplicate this from database.cc but install_functions is private +// and gets called too late +#include + +static void +sqlite3_unbase64_fn(sqlite3_context *f, int nargs, sqlite3_value ** args) +{ + if (nargs != 1) + { + sqlite3_result_error(f, "need exactly 1 arg to unbase64()", -1); + return; + } + data decoded; + decode_base64(base64(string(sqlite3_value_text_s(args[0]))), decoded); + sqlite3_result_blob(f, decoded().c_str(), decoded().size(), SQLITE_TRANSIENT); +} + +static bool +migrate_files_BLOB(sqlite3 * sql, + char ** errmsg, + app_state *app) +{ + int res; +// app->db.install_functions(app); + I(sqlite3_create_function(sql, "unbase64", -1, + SQLITE_UTF8, NULL, + &sqlite3_unbase64_fn, + NULL, NULL) == 0); + // change the encoding of file(_delta)s + if (!move_table(sql, errmsg, + "files", + "tmp", + "(" + "id primary key," + "data not null" + ")")) + return false; + + res = logged_sqlite3_exec(sql, "CREATE TABLE files\n" + "(\n" + "id primary key, -- strong hash of file contents\n" + "data not null -- compressed contents of a file\n" + ")", NULL, NULL, errmsg); + if (res != SQLITE_OK) + return false; + + res = logged_sqlite3_exec(sql, "INSERT INTO files " + "SELECT id, unbase64(data) " + "FROM tmp", NULL, NULL, errmsg); + if (res != SQLITE_OK) + return false; + + res = logged_sqlite3_exec(sql, "DROP TABLE tmp", NULL, NULL, errmsg); + if (res != SQLITE_OK) + return false; + + if (!move_table(sql, errmsg, + "file_deltas", + "tmp", + "(" + "id not null," + "base not null," + "delta not null" + ")")) + return false; + + res = logged_sqlite3_exec(sql, "CREATE TABLE file_deltas\n" + "(\n" + "id not null, -- strong hash of file contents\n" + "base not null, -- joins with files.id or file_deltas.id\n" + "delta not null, -- compressed rdiff to construct current from base\n" + "unique(id, base)\n" + ")", NULL, NULL, errmsg); + if (res != SQLITE_OK) + return false; + + res = logged_sqlite3_exec(sql, "INSERT INTO file_deltas " + "SELECT id, base, unbase64(delta) " + "FROM tmp", NULL, NULL, errmsg); + if (res != SQLITE_OK) + return false; + + res = logged_sqlite3_exec(sql, "DROP TABLE tmp", NULL, NULL, errmsg); + if (res != SQLITE_OK) + return false; + + // change comment + return true; +} + void migrate_monotone_schema(sqlite3 *sql, app_state *app) { @@ -881,10 +971,13 @@ m.add("1509fd75019aebef5ac3da3a5edf1312393b70e9", &migrate_client_to_external_privkeys); + + m.add("bd86f9a90b5d552f0be1fa9aee847ea0f317778b", + &migrate_files_BLOB); // IMPORTANT: whenever you modify this to add a new schema version, you must // also add a new migration test for the new schema version. See // tests/t_migrate_schema.at for details. - m.migrate(sql, "bd86f9a90b5d552f0be1fa9aee847ea0f317778b"); + m.migrate(sql, "acf96bb0bd230523fe5fa7621864fa252c3cf11c"); }