# # # patch "rcs_import.cc" # from [d97f64868212c2af20979793fd24b5490a98d5c1] # to [342d7f55dfb1f9fb03174fcc5ab97a6a695ecaed] # ============================================================ --- rcs_import.cc d97f64868212c2af20979793fd24b5490a98d5c1 +++ rcs_import.cc 342d7f55dfb1f9fb03174fcc5ab97a6a695ecaed @@ -1069,103 +1069,8 @@ public: }; -class revision_iterator -{ -private: - cvs_blob_index current_blob; -public: - revision_iterator(void) - : current_blob(0) - {} - - revision_iterator(const revision_iterator & ri) - : current_blob(ri.current_blob) { } - - revision_iterator & operator * (void) - { - return *this; - }; - - revision_iterator & operator = (cvs_blob_index i) - { - L(FL("assigned a value: %d") % i); - current_blob = i; - return *this; - } - - revision_iterator & operator ++ (void) - { - return *this; - } - - revision_iterator & operator ++ (int i) - { - return *this; - }; -}; - // -// After stuffing all cvs_events into blobs of events with the same -// author and changelog, we have to make sure their dependencies are -// respected. -// -void -resolve_blob_dependencies(cvs_history &cvs, - string const & branchname, - shared_ptr const & branch, - ticker & n_blobs) -{ - L(FL("branch %s currently has %d blobs.") % branchname % branch->blobs.size()); - - typedef pair< cvs_blob_index, cvs_blob_index > Edge; - typedef boost::adjacency_list< boost::vecS, boost::vecS, - boost::directedS > Graph; - - Graph g(branch->blobs.size()); - - // first split blobs which have events for the same file (i.e. intra-blob - // dependencies) - for (cvs_blob_index i = 0; i < branch->blobs.size(); ++i) - { - L(FL("blob %d contains %d events:") % i % branch->blobs[i].size()); - - set files; - - typedef vector< shared_ptr< cvs_event> >::const_iterator ity; - for(ity j = branch->blobs[i].begin(); j != branch->blobs[i].end(); ++j) - { - shared_ptr event = *j; - - if (files.find(event->path) != files.end()) - { - throw oops("splitting blobs not implemented, yet."); - } - files.insert(event->path); - - if (event->dependency) - { - // we can still use get_blob here, as there is only one blob - // per digest - blob_index_iterator k = - branch->get_blob(event->dependency->get_digest(), false); - L(FL("blob %d depends on blob %d") % i % k->second); - - add_edge(i, k->second, g); - } - } - } - - // start the topological sort, which calls our revision - // iterator to insert the revisions into our database. - revision_iterator ri; - topological_sort(g, ri); -} - - - - -// // our task here is to produce a sequence of revision descriptions // from the per-file commit records we have. we do this by rolling // forwards through the temporally sorted file-commit list @@ -1320,7 +1225,100 @@ cluster_set; typedef set cluster_set; + +class revision_iterator +{ +private: + cvs_blob_index current_blob; + +public: + revision_iterator(void) + : current_blob(0) + {} + + revision_iterator(const revision_iterator & ri) + : current_blob(ri.current_blob) { } + + revision_iterator & operator * (void) + { + return *this; + }; + + revision_iterator & operator = (cvs_blob_index i) + { + L(FL("assigned a value: %d") % i); + current_blob = i; + return *this; + } + + revision_iterator & operator ++ (void) + { + return *this; + } + + revision_iterator & operator ++ (int i) + { + return *this; + }; +}; + +// +// After stuffing all cvs_events into blobs of events with the same +// author and changelog, we have to make sure their dependencies are +// respected. +// void +resolve_blob_dependencies(cvs_history &cvs, + string const & branchname, + shared_ptr const & branch, + ticker & n_blobs) +{ + L(FL("branch %s currently has %d blobs.") % branchname % branch->blobs.size()); + + typedef pair< cvs_blob_index, cvs_blob_index > Edge; + typedef boost::adjacency_list< boost::vecS, boost::vecS, + boost::directedS > Graph; + + Graph g(branch->blobs.size()); + + // first split blobs which have events for the same file (i.e. intra-blob + // dependencies) + for (cvs_blob_index i = 0; i < branch->blobs.size(); ++i) + { + set files; + + typedef vector< shared_ptr< cvs_event> >::const_iterator ity; + for(ity j = branch->blobs[i].begin(); j != branch->blobs[i].end(); ++j) + { + shared_ptr event = *j; + + if (files.find(event->path) != files.end()) + { + throw oops("splitting blobs not implemented, yet."); + } + files.insert(event->path); + + if (event->dependency) + { + // we can still use get_blob here, as there is only one blob + // per digest + blob_index_iterator k = + branch->get_blob(event->dependency->get_digest(), false); + L(FL("blob %d depends on blob %d") % i % k->second); + + add_edge(i, k->second, g); + } + } + } + + // start the topological sort, which calls our revision + // iterator to insert the revisions into our database. + revision_iterator ri; + topological_sort(g, ri); +} + + +void import_branch(cvs_history & cvs, app_state & app, string const & branchname,