Skip to content
This repository has been archived by the owner on Feb 2, 2021. It is now read-only.

Commit

Permalink
Resolve a UI sync bug that could sometimes occur when a project was c…
Browse files Browse the repository at this point in the history
…laimed and a backers app was running at the same time.
  • Loading branch information
mikehearn committed Jan 9, 2015
1 parent e4eb05d commit 7b5d04f
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 34 deletions.
49 changes: 44 additions & 5 deletions client/src/test/java/lighthouse/model/LighthouseBackendTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,9 @@ private void sendServerStatus(HttpExchange exchange, LHProtos.Pledge... scrubbed
exchange.close();
}

private LHProtos.Pledge makeScrubbedPledge() {
private LHProtos.Pledge makeScrubbedPledge(Coin pledgedCoin) {
final LHProtos.Pledge pledge = LHProtos.Pledge.newBuilder()
.setTotalInputValue(Coin.COIN.value)
.setTotalInputValue(pledgedCoin.value)
.setProjectId(project.getID())
.setTimestamp(Utils.currentTimeSeconds())
.addTransactions(ByteString.copyFromUtf8("not a real tx"))
Expand Down Expand Up @@ -188,7 +188,7 @@ public void projectAddedWithServer() throws Exception {

projectModel.serverName.set("localhost");
project = projectModel.getProject();
final LHProtos.Pledge scrubbedPledge = makeScrubbedPledge();
final LHProtos.Pledge scrubbedPledge = makeScrubbedPledge(Coin.COIN);

ObservableList<Project> projects = backend.mirrorProjects(gate);
assertEquals(0, projects.size());
Expand Down Expand Up @@ -236,7 +236,7 @@ public void serverCheckStatus() throws Exception {
// Check that the server status map is updated correctly.
projectModel.serverName.set("localhost");
project = projectModel.getProject();
final LHProtos.Pledge scrubbedPledge = makeScrubbedPledge();
final LHProtos.Pledge scrubbedPledge = makeScrubbedPledge(Coin.COIN);
ObservableMap<Project, LighthouseBackend.CheckStatus> statuses = backend.mirrorCheckStatuses(gate);
assertEquals(0, statuses.size());
writeProjectToDisk();
Expand Down Expand Up @@ -736,24 +736,63 @@ protected ECDSASignature doSign(Sha256Hash input, BigInteger privateKeyForSignin

@Test
public void serverPledgeSync() throws Exception {
Utils.setMockClock();
// Test that the client backend stays in sync with the server as pledges are added and revoked.
projectModel.serverName.set("localhost");
project = projectModel.getProject();
ObservableSet<LHProtos.Pledge> openPledges = backend.mirrorOpenPledges(project, gate);
final LHProtos.Pledge scrubbedPledge = makeScrubbedPledge();
ObservableSet<LHProtos.Pledge> claimedPledges = backend.mirrorClaimedPledges(project, gate);
final LHProtos.Pledge scrubbedPledge = makeScrubbedPledge(Coin.COIN);
writeProjectToDisk();
gate.waitAndRun();

// Is now loaded from disk and doing request to server.
HttpExchange exchange = httpReqs.take();
sendServerStatus(exchange, scrubbedPledge);
gate.waitAndRun();
assertEquals(1, openPledges.size());
assertEquals(0, claimedPledges.size());

// Pledge gets revoked.
backend.refreshProjectStatusFromServer(project);
gate.waitAndRun();
sendServerStatus(httpReqs.take());
gate.waitAndRun();
assertEquals(0, openPledges.size());
assertEquals(0, claimedPledges.size());

// New pledges are made.
Utils.rollMockClock(60);
LHProtos.Pledge scrubbedPledge2 = makeScrubbedPledge(Coin.COIN.divide(2));
Utils.rollMockClock(60);
LHProtos.Pledge scrubbedPledge3 = makeScrubbedPledge(Coin.COIN.divide(2));
backend.refreshProjectStatusFromServer(project);
gate.waitAndRun();
sendServerStatus(httpReqs.take(), scrubbedPledge2, scrubbedPledge3);
gate.waitAndRun();
gate.waitAndRun();
assertEquals(2, openPledges.size());
assertEquals(0, claimedPledges.size());

// And now the project is claimed.
backend.refreshProjectStatusFromServer(project);
gate.waitAndRun();
LHProtos.ProjectStatus.Builder status = LHProtos.ProjectStatus.newBuilder();
status.setId(project.getID());
status.setTimestamp(Instant.now().getEpochSecond());
status.setValuePledgedSoFar(Coin.COIN.value);
status.addPledges(scrubbedPledge2);
status.addPledges(scrubbedPledge3);
status.setClaimedBy(ByteString.copyFrom(Sha256Hash.ZERO_HASH.getBytes()));
byte[] bits = status.build().toByteArray();
exchange = httpReqs.take();
exchange.sendResponseHeaders(HTTP_OK, bits.length);
exchange.getResponseBody().write(bits);
exchange.close();
// Must do this four times because observable sets don't collapse notifications :(
for (int i = 0; i < 4; i++) gate.waitAndRun();
assertEquals(0, openPledges.size());
assertEquals(2, claimedPledges.size());
}

private LHProtos.Pledge.Builder makeSimpleHalfPledge(ECKey signingKey, TransactionOutput output) {
Expand Down
82 changes: 53 additions & 29 deletions common/src/main/java/lighthouse/LighthouseBackend.java
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ private void checkPossibleClaimTX(Transaction tx) {
}

private boolean checkClaimConfidence(Transaction transaction, TransactionConfidence conf, Project project) {
executor.checkOnThread();
switch (conf.getConfidenceType()) {
case PENDING:
int seenBy = conf.numBroadcastPeers();
Expand All @@ -245,12 +246,18 @@ private boolean checkClaimConfidence(Transaction transaction, TransactionConfide
case BUILDING:
if (conf.getDepthInBlocks() > 3)
return true; // Don't care about watching this anymore.
log.info("Claim propagated or mined");
if (project.getPaymentURL() == null || mode == Mode.SERVER)
movePledgesFromOpenToClaimed(transaction, project);
else
refreshProjectStatusFromServer(project);
diskManager.setProjectState(project, new ProjectStateInfo(ProjectState.CLAIMED, transaction.getHash()));
if (diskManager.getProjectState(project).state != ProjectState.CLAIMED) {
log.info("Claim propagated or mined");
diskManager.setProjectState(project, new ProjectStateInfo(ProjectState.CLAIMED, transaction.getHash()));
if (project.getPaymentURL() == null || mode == Mode.SERVER) {
// We have pledge data in this case, so we can consult the claim tx to see which were used.
movePledgesFromOpenToClaimed(transaction, project);
} else {
// We (probably) don't have pledge data in this case, so put us on the regular server update
// code path, where the pledges will all be marked as as claimed.
jitteredServerRequery(project); // Allow the server time to notice the claim tx as well.
}
}
break;
case DEAD:
log.warn("Claim double spent! Overridden by {}", conf.getOverridingTransaction());
Expand All @@ -263,6 +270,7 @@ private boolean checkClaimConfidence(Transaction transaction, TransactionConfide
}

private void movePledgesFromOpenToClaimed(Transaction claim, Project project) {
// This only works if we have the actual pledge data.
executor.checkOnThread();
List<LHProtos.Pledge> taken = new ArrayList<>();
for (LHProtos.Pledge pledge : getOpenPledgesFor(project)) {
Expand Down Expand Up @@ -347,6 +355,7 @@ private void diskPledgesChanged(SetChangeListener.Change<? extends LHProtos.Pled
log.info("New pledge found on disk for {}", project);
// Jitter to give the dependency txns time to propagate in case somehow our source of pledges
// is faster than the P2P network (e.g. local network drive or in regtesting mode).
// TODO: This delay is pointless when we reach here during startup.
jitteredExecute(() -> checkPledgeAgainstP2PNetwork(project, added), TX_PROPAGATION_TIME_SECS);
}
}
Expand All @@ -366,16 +375,9 @@ private boolean isPledgeKnown(LHProtos.Pledge pledge) {
return false;
}

// Completes with either the given pledge, or with null if it failed verification (reason not available here).
private CompletableFuture<LHProtos.Pledge> checkPledgeAgainstP2PNetwork(Project project, LHProtos.Pledge pledge) {
private void checkPledgeAgainstP2PNetwork(Project project, LHProtos.Pledge pledge) {
// Can be on any thread.
return checkPledgesAgainstP2PNetwork(project, FXCollections.observableSet(pledge), false).thenApply(results ->
{
if (results.isEmpty())
return null;
else
return results.iterator().next();
});
checkPledgesAgainstP2PNetwork(project, FXCollections.observableSet(pledge), false);
}

// Completes with the set of pledges that passed verification.
Expand Down Expand Up @@ -416,10 +418,6 @@ private CompletableFuture<Set<LHProtos.Pledge>> checkPledgesAgainstP2PNetwork(Pr
@Override
public void onSuccess(@Nullable List<Peer> peers) {
log.info("Peers available: {}", peers);
// On backend thread here. We must be because we need to ensure only a single UTXO query is in flight
// on the P2P network at once and only triggering such queries from the backend thread is a simple
// way to achieve that. It means we block the backend thread until the query completes or times out
// but that's OK - the other tasks can wait.
executor.checkOnThread();
checkNotNull(peers);
// Do a fast delete of any peers that claim they don't support NODE_GETUTXOS. We ensure we always
Expand Down Expand Up @@ -471,7 +469,7 @@ private void doUTXOLookupsForPledges(Project project, ObservableSet<LHProtos.Ple
executor.checkOnThread();
try {
// The multiplexor issues the same query to multiple peers and verifies they're all consistent.
log.info("Querying {} peers", peers.size());
log.info("Querying {} peers {}", peers.size(), checkingAllPledges ? "(checking all pledges)" : "");
PeerUTXOMultiplexor multiplexor = new PeerUTXOMultiplexor(peers);
// The batcher queues up queries from project.verifyPledge and combines them into a single query, to
// speed things up and minimise network traffic.
Expand Down Expand Up @@ -558,20 +556,41 @@ public CompletableFuture<LHProtos.ProjectStatus> refreshProjectStatusFromServer(
future.completeExceptionally(ex);
} else {
try {
// Status contains a new list of pledges. We should update our own observable list by touching it
// as little as possible. This ensures that as updates flow through to the UI any animations look
// good (as opposed to total replacement which would animate poorly).
log.info("Processing project status:\n{}", status);
syncPledges(project, new HashSet<>(status.getPledgesList()), status.getPledgesList());
executor.checkOnThread();
// Server's view of the truth overrides our own for UI purposes, as we might have failed to
// observe the contract/claim tx if the user imported the project post-claim.
if (status.hasClaimedBy() && diskManager.getProjectState(project).state != ProjectState.CLAIMED) {
diskManager.setProjectState(project, new ProjectStateInfo(ProjectState.CLAIMED,
new Sha256Hash(status.getClaimedBy().toByteArray())));
//
// WARNING! During app startup we can end up with the p2p network racing with the server to
// tell us that the project was claimed. This is inherent - we're catching up with the block
// chain and are about to see the claim, whilst simultaneously we're asking the server for
// the status (because we don't want to wait for the block chain to sync before showing the
// user existing pledges). The code paths are slightly different because when we see the claim
// tx from the p2p network we only mark the pledges that appear in that tx as claimed, whereas
// here we mark all of them. This is due to the difference between serverless and server assisted
// projects (serverless can have open pledges even after a claim).
if (status.hasClaimedBy()) {
if (diskManager.getProjectState(project).state != ProjectState.CLAIMED) {
diskManager.setProjectState(project, new ProjectStateInfo(ProjectState.CLAIMED,
new Sha256Hash(status.getClaimedBy().toByteArray())));
}
ObservableSet<LHProtos.Pledge> curOpenPledges = getOpenPledgesFor(project);
ObservableSet<LHProtos.Pledge> curClaimedPledges = getClaimedPledgesFor(project);
log.info("Project is claimed ({}/{})", curOpenPledges.size(), curClaimedPledges.size());
curClaimedPledges.clear();
curClaimedPledges.addAll(status.getPledgesList());
curOpenPledges.clear();
} else {
// Status contains a new list of pledges. We should update our own observable list by touching it
// as little as possible. This ensures that as updates flow through to the UI any animations look
// good (as opposed to total replacement which would animate poorly).
syncPledges(project, new HashSet<>(status.getPledgesList()), status.getPledgesList());
}
markAsCheckDone(project);
future.complete(status);
log.info("Processing of project status from server complete");
} catch (Throwable t) {
log.error("Caught exception whilst processing status", t);
future.completeExceptionally(t);
}
}
Expand All @@ -581,6 +600,9 @@ public CompletableFuture<LHProtos.ProjectStatus> refreshProjectStatusFromServer(
}

private void syncPledges(Project forProject, Set<LHProtos.Pledge> testedPledges, List<LHProtos.Pledge> verifiedPledges) {
// TODO: This whole function is too complicated and fragile. It should probably be split up for different server
// vs client vs serverless codepaths.

executor.checkOnThread();
final ObservableSet<LHProtos.Pledge> curOpenPledges = getOpenPledgesFor(forProject);

Expand All @@ -595,8 +617,7 @@ private void syncPledges(Project forProject, Set<LHProtos.Pledge> testedPledges,
newlyOpen.removeAll(curOpenPledges);
if (mode == Mode.CLIENT) {
// Servers should of course ideally not give us revoked pledges, but it may take a bit of time for the
// server to notice, especially because currently we wait for a block confirmation before noticing the
// double spends. So there can be a window of time in which we know we revoked our own pledge, but the
// server to notice. So there can be a window of time in which we know we revoked our own pledge, but the
// server keeps sending it to us.
newlyOpen.removeIf(wallet::wasPledgeRevoked);
// Remove if this is a scrubbed version of a pledge we already have i.e. because we created it, uploaded it
Expand Down Expand Up @@ -777,6 +798,9 @@ public void notifyNewBestBlock(StoredBlock block) throws VerificationException {
// any pledges were revoked). This also ensures the project page can be left open and it'll update from
// time to time, which is nice if you just want it running in the corner of a room or on a projector,
// etc.

// TODO: Get rid of this and just use a scheduled job (issue 110).

if (mode == Mode.CLIENT) {
// Don't bother with pointless/noisy server requeries until we're caught up with the chain tip.
if (block.getHeight() > peerGroup.getMostCommonChainHeight() - 2) {
Expand Down

0 comments on commit 7b5d04f

Please sign in to comment.