Skip to content

Commit

Permalink
Attempt to delay FindMissingBlocklistHashes call when we know that …
Browse files Browse the repository at this point in the history
…we need all the volumes anyway.

This is an experiment related to #2050.
  • Loading branch information
kenkendk committed Oct 27, 2016
1 parent 5cb0b75 commit 309e8fe
Showing 1 changed file with 37 additions and 33 deletions.
70 changes: 37 additions & 33 deletions Duplicati/Library/Main/Operation/RecreateDatabaseHandler.cs
Original file line number Diff line number Diff line change
Expand Up @@ -410,42 +410,46 @@ internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilt
}

var progress = 0;
foreach(var sf in new AsyncDownloader(lst, backend))
using(var tmpfile = sf.TempFile)
using(var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
using(var tr = restoredb.BeginTransaction())
using (var tr = restoredb.BeginTransaction())
{
foreach (var sf in new AsyncDownloader(lst, backend))
{
if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
using (var tmpfile = sf.TempFile)
using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
{
backend.WaitForComplete(restoredb, null);
return;
}

progress++;
m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

// Update the block table so we know about the block/volume map
foreach(var h in rd.Blocks)
restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);

// Grab all known blocklists from the volume
foreach(var blocklisthash in restoredb.GetBlockLists(volumeid))
restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);

// Update tables so we know if we are done
restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);

using(new Logging.Timer("CommitRestoredBlocklist"))
tr.Commit();

//At this point we can patch files with data from the block volume
if (blockprocessor != null)
blockprocessor(sf.Name, rd);
if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
{
backend.WaitForComplete(restoredb, null);
return;
}

progress++;
m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

// Update the block table so we know about the block/volume map
foreach (var h in rd.Blocks)
restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);

// Grab all known blocklists from the volume
foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);

//At this point we can patch files with data from the block volume
if (blockprocessor != null)
blockprocessor(sf.Name, rd);
}
}

// Update lists after we are done
restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);

using (new Logging.Timer("CommitRestoredBlocklist"))
tr.Commit();
}
}
}

Expand Down

0 comments on commit 309e8fe

Please sign in to comment.