diff --git a/js/activesync/folder.js b/js/activesync/folder.js index 370f877d..b4733099 100644 --- a/js/activesync/folder.js +++ b/js/activesync/folder.js @@ -3,6 +3,7 @@ define( 'rdcommon/log', '../date', '../syncbase', + '../allback', '../db/mail_rep', 'activesync/codepages/AirSync', 'activesync/codepages/AirSyncBase', @@ -19,6 +20,7 @@ define( $log, $date, $sync, + allback, mailRep, $AirSync, $AirSyncBase, @@ -844,44 +846,30 @@ ActiveSyncFolderConn.prototype = { if (this._account.conn.currentVersion.lt('12.0')) return this._syncBodies(headers, callback); - var anyErr, - pending = 1, - downloadsNeeded = 0, + var downloadsNeeded = 0, folderConn = this; - function next(err) { - if (err && !anyErr) - anyErr = err; - - if (!--pending) { - folderConn._storage.runAfterDeferredCalls(function() { - callback(anyErr, /* number downloaded */ downloadsNeeded - pending); - }); - } - } - + var latch = allback.latch(); for (var i = 0; i < headers.length; i++) { + var header = headers[i]; // We obviously can't do anything with null header references. // To avoid redundant work, we also don't want to do any fetching if we // already have a snippet. This could happen because of the extreme // potential for a caller to spam multiple requests at us before we // service any of them. (Callers should only have one or two outstanding // jobs of this and do their own suppression tracking, but bugs happen.) - if (!headers[i] || headers[i].snippet !== null) { + if (!header || header.snippet !== null) { continue; } - pending++; // This isn't absolutely guaranteed to be 100% correct, but is good enough // for indicating to the caller that we did some work. downloadsNeeded++; - this.downloadBodyReps(headers[i], options, next); + this.downloadBodyReps(header, options, latch.defer(header.suid)); } - - // by having one pending item always this handles the case of not having any - // snippets needing a download and also returning in the next tick of the - // event loop. - window.setZeroTimeout(next); + latch.then(function(results) { + callback(allback.extractErrFromCallbackArgs(results), downloadsNeeded); + }); }, downloadBodyReps: lazyConnection(1, function(header, options, callback) { @@ -1024,20 +1012,8 @@ ActiveSyncFolderConn.prototype = { return; } - var status, anyErr, - i = 0, - pending = 1; - - function next(err) { - if (err && !anyErr) - anyErr = err; - - if (!--pending) { - folderConn._storage.runAfterDeferredCalls(function() { - callback(anyErr); - }); - } - } + var latch = allback.latch(); + var iHeader = 0; var e = new $wbxml.EventParser(); var base = [as.Sync, as.Collections, as.Collection]; @@ -1045,27 +1021,30 @@ ActiveSyncFolderConn.prototype = { folderConn.syncKey = node.children[0].textContent; }); e.addEventListener(base.concat(as.Status), function(node) { - status = node.children[0].textContent; + var status = node.children[0].textContent; + if (status !== asEnum.Status.Success) { + latch.defer('status')('unknown'); + } }); e.addEventListener(base.concat(as.Responses, as.Fetch, as.ApplicationData, em.Body), function(node) { // We assume the response is in the same order as the request! - var header = headers[i++]; + var header = headers[iHeader++]; var bodyContent = node.children[0].textContent; + var latchCallback = latch.defer(header.suid); - pending++; folderConn._storage.getMessageBody(header.suid, header.date, function(body) { - folderConn._updateBody(header, body, bodyContent, false, next); + folderConn._updateBody(header, body, bodyContent, false, + latchCallback); }); }); e.run(aResponse); - if (status !== asEnum.Status.Success) - return next('unknown'); - - next(null); + latch.then(function(results) { + callback(allback.extractErrFromCallbackArgs(results)); + }); }); }, @@ -1105,10 +1084,12 @@ ActiveSyncFolderConn.prototype = { } }; + var latch = allback.latch(); this._storage.updateMessageHeader(header.date, header.id, false, header, - bodyInfo); - this._storage.updateMessageBody(header, bodyInfo, {}, event); - this._storage.runAfterDeferredCalls(callback.bind(null, null, bodyInfo)); + bodyInfo, latch.defer('header')); + this._storage.updateMessageBody(header, bodyInfo, {}, event, + latch.defer('body')); + latch.then(callback.bind(null, null, bodyInfo, /* flushed */ false)); }, sync: lazyConnection(1, function asfc_sync(accuracyStamp, doneCallback, @@ -1140,6 +1121,7 @@ ActiveSyncFolderConn.prototype = { return; } + var latch = allback.latch(); for (var iter in Iterator(added)) { var message = iter[1]; // If we already have this message, it's probably because we moved it as @@ -1148,8 +1130,8 @@ ActiveSyncFolderConn.prototype = { if (storage.hasMessageWithServerId(message.header.srvid)) continue; - storage.addMessageHeader(message.header, message.body); - storage.addMessageBody(message.header, message.body); + storage.addMessageHeader(message.header, message.body, latch.defer()); + storage.addMessageBody(message.header, message.body, latch.defer()); addedMessages++; } @@ -1175,7 +1157,7 @@ ActiveSyncFolderConn.prototype = { // Previously, this callback was called without safeguards in place // to prevent issues caused by the message variable changing, // so it is now bound to the function. - }.bind(null, message), /* body hint */ null); + }.bind(null, message), /* body hint */ null, latch.defer()); changedMessages++; // XXX: update bodies } @@ -1187,7 +1169,7 @@ ActiveSyncFolderConn.prototype = { if (!storage.hasMessageWithServerId(messageGuid)) continue; - storage.deleteMessageByServerId(messageGuid); + storage.deleteMessageByServerId(messageGuid, latch.defer()); deletedMessages++; } @@ -1195,9 +1177,9 @@ ActiveSyncFolderConn.prototype = { var messagesSeen = addedMessages + changedMessages + deletedMessages; // Do not report completion of sync until all of our operations have - // been persisted to our in-memory database. (We do not wait for - // things to hit the disk.) - storage.runAfterDeferredCalls(function() { + // been persisted to our in-memory database. We tell this via their + // callbacks having completed. + latch.then(function() { // Note: For the second argument here, we report the number of // messages we saw that *changed*. This differs from IMAP, which // reports the number of messages it *saw*. diff --git a/js/allback.js b/js/allback.js index 729b0c2f..7c218a32 100644 --- a/js/allback.js +++ b/js/allback.js @@ -26,7 +26,8 @@ define(['exports'], function(exports) { * potential memory leaks is not currently provided, but could be. */ exports.allbackMaker = function allbackMaker(names, allDoneCallback) { - var aggrData = {}, callbacks = {}, waitingFor = names.concat(); + var aggrData = Object.create(null), callbacks = {}, + waitingFor = names.concat(); names.forEach(function(name) { // (build a consistent shape for aggrData regardless of callback ordering) @@ -92,7 +93,8 @@ exports.allbackMaker = function allbackMaker(names, allDoneCallback) { exports.latch = function() { var ready = false; var deferred = {}; - var results = {}; + // Avoid Object.prototype and any for-enumerations getting tripped up + var results = Object.create(null); var count = 0; deferred.promise = new Promise(function(resolve, reject) { @@ -139,9 +141,37 @@ exports.latch = function() { }; }; +/** + * Given the results object from an allback.latch() where named callbacks were + * used (or else we won't save the result!) and the callbacks use the form of + * callback(errIfAny, ...), find and return the first error object, or return + * null if none was found. + * + * Important notes: + * - Use this for callback-based idioms in the node style + * - You MUST use latch.defer(name), not latch.defer()! + * - Because of JS object property ordering, we actually will return the result + * of the first callback that fired with an error value, but you probably + * do not want to be depending on this too much. + */ +exports.extractErrFromCallbackArgs = function(results) { + // If there are any errors, find and propagate. + var anyErr = null; + for (var key in results) { + var args = results[key]; + var errIfAny = args[0]; + if (errIfAny) { + anyErr = errIfAny; + break; + } + } + return anyErr; +}; + exports.latchedWithRejections = function(namedPromises) { return new Promise(function(resolve, reject) { - var results = {}; + // Avoid Object.prototype + var results = Object.create(null); var pending = 0; Object.keys(namedPromises).forEach(function(name) { pending++; diff --git a/js/db/mail_rep.js b/js/db/mail_rep.js index f136a79a..ee4f663a 100644 --- a/js/db/mail_rep.js +++ b/js/db/mail_rep.js @@ -107,10 +107,6 @@ function makeHeaderInfo(raw) { * from the body without this. * } * @key[size Number] - * @key[to @listof[NameAddressPair]] - * @key[cc @listof[NameAddressPair]] - * @key[bcc @listof[NameAddressPair]] - * @key[replyTo NameAddressPair] * @key[attaching #:optional AttachmentInfo]{ * Because of memory limitations, we need to encode and attach attachments * in small pieces. An attachment in the process of being attached is diff --git a/js/ext/activesync-lib b/js/ext/activesync-lib index fbdc7279..e7fd6f3c 160000 --- a/js/ext/activesync-lib +++ b/js/ext/activesync-lib @@ -1 +1 @@ -Subproject commit fbdc727926a05901dfea6583cd84a4c1d5f5c371 +Subproject commit e7fd6f3c85101b272de089c4db97420c92511ad3 diff --git a/js/ext/rdcommon/log.js b/js/ext/rdcommon/log.js index 28f840f0..13dbec35 100644 --- a/js/ext/rdcommon/log.js +++ b/js/ext/rdcommon/log.js @@ -1795,14 +1795,13 @@ exports.DEBUG_realtimeLogEverything = function(dumpFunc) { reportNewLogger: function(logger, parentLogger) { logger._actor = { __loggerFired: function() { - // Destructively pop it off the end of the list; inductively this - // should always be index 0, but you never know! - var entry = logger._entries.pop(); + var entry = logger._entries[logger._entries.length - 1]; // Let's look like: LoggerType(semanticIdent)["name", ...] dumpFunc(logger.__defName + '(' + logger._ident + ')' + JSON.stringify(entry) + '\n'); } }; + return parentLogger; } }; UNDER_TEST_DEFAULT = EverythingTester; diff --git a/js/imap/folder.js b/js/imap/folder.js index 302f1543..de061a9e 100644 --- a/js/imap/folder.js +++ b/js/imap/folder.js @@ -561,8 +561,8 @@ console.log('BISECT CASE', serverUIDs.length, 'curDaysDelta', curDaysDelta); }, /** - * Initiates a request to download all body reps. If a snippet has not yet - * been generated this will also generate the snippet... + * Initiates a request to download all body reps for a single message. If a + * snippet has not yet been generated this will also generate the snippet... */ _lazyDownloadBodyReps: function(header, options, callback) { if (typeof(options) === 'function') { @@ -585,6 +585,7 @@ console.log('BISECT CASE', serverUIDs.length, 'curDaysDelta', curDaysDelta); // build the list of requests based on downloading required. var requests = []; + var latch = $allback.latch(); bodyInfo.bodyReps.forEach(function(rep, idx) { // attempt to be idempotent by only requesting the bytes we need if we // actually need them... @@ -595,7 +596,8 @@ console.log('BISECT CASE', serverUIDs.length, 'curDaysDelta', curDaysDelta); uid: header.srvid, partInfo: rep._partInfo, bodyRepIndex: idx, - createSnippet: idx === bodyRepIdx + createSnippet: idx === bodyRepIdx, + headerUpdatedCallback: latch.defer(header.srvid + '-' + rep._partInfo) }; // default to the entire remaining email. We use the estimate * largish @@ -655,67 +657,34 @@ console.log('BISECT CASE', serverUIDs.length, 'curDaysDelta', curDaysDelta); requests ); - self._handleBodyFetcher(fetch, header, bodyInfo, function(err) { - callback(err, bodyInfo); + self._handleBodyFetcher(fetch, header, bodyInfo, latch.defer('body')); + latch.then(function(results) { + callback($allback.extractErrFromCallbackArgs(results), bodyInfo); }); }; this._storage.getMessageBody(header.suid, header.date, gotBody); }, - /** - * Download a snippet and a portion of the bodyRep to go along with it... In - * all cases we expect the bodyReps to be completely empty as we also will - * generate the snippet in the case of completely downloading a snippet. - */ - _downloadSnippet: function(header, callback) { - var self = this; - this._storage.getMessageBody(header.suid, header.date, function(body) { - // attempt to find a rep - var bodyRepIndex = $imapchew.selectSnippetBodyRep(header, body); - - // no suitable snippet we are done. - if (bodyRepIndex === -1) - return callback(); - - var rep = body.bodyReps[bodyRepIndex]; - var requests = [{ - uid: header.srvid, - bodyRepIndex: bodyRepIndex, - partInfo: rep._partInfo, - bytes: [0, NUMBER_OF_SNIPPET_BYTES], - createSnippet: true - }]; - - var fetch = new $imapbodyfetcher.BodyFetcher( - self._conn, - $imapsnippetparser.SnippetParser, - requests - ); - - self._handleBodyFetcher( - fetch, - header, - body, - callback - ); - }); - }, - /** * Wrapper around common bodyRep updates... */ - _handleBodyFetcher: function(fetcher, header, body, callback) { + _handleBodyFetcher: function(fetcher, header, body, bodyUpdatedCallback) { var event = { changeDetails: { bodyReps: [] } }; - var self = this; + // This will be invoked once per body part that is successfully downloaded + // or fails to download. + fetcher.onparsed = function(err, req, resp) { + if (err) { + req.headerUpdatedCallback(err); + return; + } - fetcher.onparsed = function(req, resp) { - $imapchew.updateMessageWithFetch(header, body, req, resp, self._LOG); + $imapchew.updateMessageWithFetch(header, body, req, resp, this._LOG); header.bytesToDownloadForBodyDisplay = $imapchew.calculateBytesToDownloadForImapBodyDisplay(body); @@ -723,31 +692,30 @@ console.log('BISECT CASE', serverUIDs.length, 'curDaysDelta', curDaysDelta); // Always update the header so that we can save // bytesToDownloadForBodyDisplay, which will tell the UI whether // or not we can show the message body right away. - self._storage.updateMessageHeader( + this._storage.updateMessageHeader( header.date, header.id, false, header, - body + body, + req.headerUpdatedCallback.bind(null, null) // no error ); event.changeDetails.bodyReps.push(req.bodyRepIndex); - }; - - fetcher.onerror = function(e) { - callback(e); - }; + }.bind(this); + // This will be invoked after all of the onparsed events have fired. fetcher.onend = function() { - self._storage.updateMessageBody( + // Since we no longer have any updates to make to the body, we want to + // finally update it now. + this._storage.updateMessageBody( header, body, {}, - event + event, + bodyUpdatedCallback.bind(null, null) // we do not/cannot error ); - - self._storage.runAfterDeferredCalls(callback); - }; + }.bind(this); }, /** @@ -755,21 +723,8 @@ console.log('BISECT CASE', serverUIDs.length, 'curDaysDelta', curDaysDelta); * module deps are loaded. */ _lazyDownloadBodies: function(headers, options, callback) { - var pending = 1, downloadsNeeded = 0; - - var self = this; - var anyErr; - function next(err) { - if (err && !anyErr) - anyErr = err; - - if (!--pending) { - self._storage.runAfterDeferredCalls(function() { - callback(anyErr, /* number downloaded */ downloadsNeeded - pending); - }); - } - } - + var downloadsNeeded = 0; + var latch = $allback.latch(); for (var i = 0; i < headers.length; i++) { // We obviously can't do anything with null header references. // To avoid redundant work, we also don't want to do any fetching if we @@ -777,21 +732,19 @@ console.log('BISECT CASE', serverUIDs.length, 'curDaysDelta', curDaysDelta); // potential for a caller to spam multiple requests at us before we // service any of them. (Callers should only have one or two outstanding // jobs of this and do their own suppression tracking, but bugs happen.) - if (!headers[i] || headers[i].snippet !== null) { + var header = headers[i]; + if (!header || header.snippet !== null) { continue; } - pending++; // This isn't absolutely guaranteed to be 100% correct, but is good enough // for indicating to the caller that we did some work. downloadsNeeded++; - this.downloadBodyReps(headers[i], options, next); + this.downloadBodyReps(headers[i], options, latch.defer(header.suid)); } - - // by having one pending item always this handles the case of not having any - // snippets needing a download and also returning in the next tick of the - // event loop. - window.setZeroTimeout(next); + latch.then(function(results) { + callback($allback.extractErrFromCallbackArgs(results), downloadsNeeded); + }); }, /** diff --git a/js/imap/protocol/bodyfetcher.js b/js/imap/protocol/bodyfetcher.js index c7bc8b45..e1bcd2ee 100644 --- a/js/imap/protocol/bodyfetcher.js +++ b/js/imap/protocol/bodyfetcher.js @@ -17,9 +17,8 @@ define(function() { * // in all examples item is a single element in the * // array (third argument). * - * fetcher.onerror = function(err, item) {}; - * fetcher.ondata = function(parsed, item) {} - * fetcher.onend = function() {} + * fetcher.onparsed = function(parsed, item) {} + * fetcher.onend = function(err) {} * */ function BodyFetcher(connection, parserClass, list) { @@ -29,8 +28,7 @@ function BodyFetcher(connection, parserClass, list) { this.pending = list.length; - this.onerror = null; - this.ondata = null; + this.onparsed = null; this.onend = null; list.forEach(this._fetch, this); @@ -38,8 +36,6 @@ function BodyFetcher(connection, parserClass, list) { BodyFetcher.prototype = { _fetch: function(request) { - var self = this; - this.connection.listMessages( request.uid, [ @@ -51,9 +47,7 @@ BodyFetcher.prototype = { { byUid: true }, function (err, messages) { if (err) { - // if fetch provides an error we expect this request to be - // completed so we resolve here... - self._resolve(err, request); + this._resolve(err, request, null); } else { var parser = new this.parserClass(request.partInfo); var msg = messages[0]; @@ -66,32 +60,19 @@ BodyFetcher.prototype = { } if (!body) { - self.resolve('no body', request); + this.resolve('no body', request); } else { parser.parse(body); - self._resolve(null, request, parser.complete()); + this._resolve(null, request, parser.complete()); } } }.bind(this)); }, - _resolve: function() { - var args = Array.slice(arguments); - var err = args[0]; - - if (err) { - if (this.onerror) { - this.onerror.apply(this, args); - } - } else { - if (this.onparsed) { - // get rid of the error object - args.shift(); - - this.onparsed.apply(this, args); - } + _resolve: function(err, req, result) { + if (this.onparsed) { + this.onparsed(err, req, result); } - if (!--this.pending && this.onend) { this.onend(); } diff --git a/js/imap/protocol/sync.js b/js/imap/protocol/sync.js index 102aa2f2..58d8dd56 100644 --- a/js/imap/protocol/sync.js +++ b/js/imap/protocol/sync.js @@ -1,10 +1,12 @@ define( [ '../imapchew', + '../../allback', 'exports' ], function( $imapchew, + allback, exports ) { @@ -115,50 +117,30 @@ Sync.prototype = { }, _beginSync: function() { - // pending operations - var pending = 1; - var self = this; - - function next() { - if (!--pending) { - self.storage.runAfterDeferredCalls(function() { - if (!self.oncomplete) - return; - - // Need a timeout here because we batch slices in SliceBridgeProxy and - // only want to call oncomplete after all those slices have been sent - // to keep the order the same. - window.setZeroTimeout( - function() { - self.oncomplete( - self.newUIDs.length, - self.knownUIDs.length - ); - } - ); - }); - } - } + var latch = allback.latch(); if (this.newUIDs.length) { - pending++; - this._handleNewUids(next); + this._handleNewUids(latch.defer('new')); } if (this.knownUIDs.length) { - pending++; - this._handleKnownUids(next); + this._handleKnownUids(latch.defer('known')); } - window.setZeroTimeout(next); + latch.then(function() { + if (!this.oncomplete) { + return; + } + + this.oncomplete(this.newUIDs.length, this.knownUIDs.length); + }.bind(this)); }, _handleNewUids: function(callback) { var pendingSnippets = []; var self = this; - - this.connection.listMessages( + this.connection.listMessages( this.newUIDs, INITIAL_FETCH_PARAMS, { byUid: true }, @@ -172,6 +154,7 @@ Sync.prototype = { return; } + var latch = allback.latch(); messages.forEach(function(msg) { // Filter out the \Recent flag; our old imap library didn't // pass it along (complaining about it being useless), so we @@ -198,8 +181,10 @@ Sync.prototype = { // flush our body/header information ? should we do some sorting, // etc.. here or just let the UI update ASAP? - self.storage.addMessageHeader(chewRep.header, chewRep.bodyInfo); - self.storage.addMessageBody(chewRep.header, chewRep.bodyInfo); + self.storage.addMessageHeader( + chewRep.header, chewRep.bodyInfo, latch.defer()); + self.storage.addMessageBody( + chewRep.header, chewRep.bodyInfo, latch.defer()); } catch (ex) { // it's fine for us to not add bad messages to the database @@ -209,7 +194,7 @@ Sync.prototype = { } }.bind(this)); - callback(); + latch.then(callback); // our caller doesn't care about its args }.bind(this)); }, @@ -226,6 +211,7 @@ Sync.prototype = { return; } + var latch = allback.latch(); messages.forEach(function(msg, i) { console.log('FETCHED', i, 'known id', self.knownHeaders[i].id, 'known srvid', self.knownHeaders[i].srvid, @@ -270,8 +256,9 @@ Sync.prototype = { console.warn(' FLAGS: "' + header.flags.toString() + '" VS "' + msg.flags.toString() + '"'); header.flags = msg.flags; - self.storage.updateMessageHeader(header.date, header.id, true, - header, /* body hint */ null); + self.storage.updateMessageHeader( + header.date, header.id, true, header, /* body hint */ null, + latch.defer()); } else { self.storage.unchangedMessageHeader(header); @@ -282,7 +269,7 @@ Sync.prototype = { self._updateProgress(KNOWN_HEADERS_AGGR_COST + KNOWN_HEADERS_PER_COST * self.knownUIDs.length); - callback(); + latch.then(callback); // our caller doesn't care about its args }.bind(this)); } diff --git a/js/mailapi.js b/js/mailapi.js index 154d41a2..ffb5f18e 100644 --- a/js/mailapi.js +++ b/js/mailapi.js @@ -2633,7 +2633,7 @@ MailAPI.prototype = { this._liveBodies[msg.handle] = body; } - req.callback.call(null, body); + req.callback.call(null, body, req.suid); return true; }, diff --git a/js/mailslice.js b/js/mailslice.js index d5c45e03..bf211c73 100755 --- a/js/mailslice.js +++ b/js/mailslice.js @@ -1057,6 +1057,7 @@ FolderStorage.prototype = { headerBlocks: this._dirtyHeaderBlocks, bodyBlocks: this._dirtyBodyBlocks, }; + this._LOG.generatePersistenceInfo(pinfo); this._dirtyHeaderBlocks = {}; this._dirtyBodyBlocks = {}; this._dirty = false; @@ -2048,6 +2049,17 @@ FolderStorage.prototype = { date, uid), iInfo = infoTuple[0], info = infoTuple[1]; + // For database consistency reasons in the worst-case we want to make sure + // that we don't update the block info structure until we are also + // simultaneously updating the block itself. We use null for sentinel + // values. We only update when non-null. + var updateInfo = { + startTS: null, + startUID: null, + endTS: null, + endUID: null + }; + // -- not in a block, find or create one if (!info) { // - Create a block if no blocks exist at all. @@ -2065,12 +2077,12 @@ FolderStorage.prototype = { // We are chronologically/UID-ically more recent, so check the end range // for expansion needs. if (STRICTLY_AFTER(date, info.endTS)) { - info.endTS = date; - info.endUID = uid; + updateInfo.endTS = date; + updateInfo.endUID = uid; } else if (date === info.endTS && uid > info.endUID) { - info.endUID = uid; + updateInfo.endUID = uid; } } // - Is there a preceding/younger dude and we fit? @@ -2082,12 +2094,12 @@ FolderStorage.prototype = { // We are chronologically less recent, so check the start range for // expansion needs. if (BEFORE(date, info.startTS)) { - info.startTS = date; - info.startUID = uid; + updateInfo.startTS = date; + updateInfo.startUID = uid; } else if (date === info.startTS && uid < info.startUID) { - info.startUID = uid; + updateInfo.startUID = uid; } } // Any adjacent blocks at this point are overflowing, so it's now a @@ -2100,12 +2112,12 @@ FolderStorage.prototype = { // We are chronologically less recent, so check the start range for // expansion needs. if (BEFORE(date, info.startTS)) { - info.startTS = date; - info.startUID = uid; + updateInfo.startTS = date; + updateInfo.startUID = uid; } else if (date === info.startTS && uid < info.startUID) { - info.startUID = uid; + updateInfo.startUID = uid; } } // - It must be the trailing dude @@ -2114,12 +2126,12 @@ FolderStorage.prototype = { // We are chronologically/UID-ically more recent, so check the end range // for expansion needs. if (STRICTLY_AFTER(date, info.endTS)) { - info.endTS = date; - info.endUID = uid; + updateInfo.endTS = date; + updateInfo.endUID = uid; } else if (date === info.endTS && uid > info.endUID) { - info.endUID = uid; + updateInfo.endUID = uid; } } } @@ -2127,11 +2139,26 @@ FolderStorage.prototype = { function processBlock(block) { // 'this' gets explicitly bound // -- perform the insertion + // - update block info + if (updateInfo.startTS !== null) { + info.startTS = updateInfo.startTS; + } + if (updateInfo.startUID !== null) { + info.startUID = updateInfo.startUID; + } + if (updateInfo.endTS !== null) { + info.endTS = updateInfo.endTS; + } + if (updateInfo.endUID !== null) { + info.endUID = updateInfo.endUID; + } // We could do this after the split, but this makes things simpler if // we want to factor in the newly inserted thing's size in the // distribution of bytes. info.estSize += estSizeCost; info.count++; + + // - actual insertion insertInBlock(thing, uid, info, block); // -- split if necessary @@ -2177,26 +2204,6 @@ FolderStorage.prototype = { this._loadBlock(type, info, processBlock.bind(this)); }, - /** - * Run the given callback after all pending deferred calls have run. - * - * @param {Function} callback - * @param {Boolean} [alwaysDefer=false] - * Should we defer the callback to the next turn of the event loop even - * if there's no reason to wait? Arguably this is what we should always - * do (at least by default) for human sanity purposes, but existing code - * would need to be audited. - */ - runAfterDeferredCalls: function(callback, alwaysDefer) { - if (this._deferredCalls.length) { - this._deferredCalls.push(callback); - } else if (alwaysDefer) { - window.setZeroTimeout(callback); - } else { - callback(); - } - }, - /** * Run deferred calls until we run out of deferred calls or _pendingLoads goes * non-zero again. @@ -4082,10 +4089,11 @@ FolderStorage.prototype = { * Retrieve and update a header by locating it */ updateMessageHeaderByServerId: function(srvid, partOfSync, - headerOrMutationFunc, body) { + headerOrMutationFunc, body, + callback) { if (this._pendingLoads.length) { this._deferredCalls.push(this.updateMessageHeaderByServerId.bind( - this, srvid, partOfSync, headerOrMutationFunc)); + this, srvid, partOfSync, headerOrMutationFunc, body, callback)); return; } @@ -4103,7 +4111,8 @@ FolderStorage.prototype = { // future work: this method will duplicate some work to re-locate // the header; we could try and avoid doing that. this.updateMessageHeader( - header.date, header.id, partOfSync, headerOrMutationFunc, body); + header.date, header.id, partOfSync, headerOrMutationFunc, body, + callback); return; } } @@ -4225,12 +4234,13 @@ FolderStorage.prototype = { * Currently, the mapping is a naive, always-in-memory (at least as long as * the FolderStorage is in memory) map. */ - deleteMessageByServerId: function(srvid) { + deleteMessageByServerId: function(srvid, callback) { if (!this._serverIdHeaderBlockMapping) throw new Error('Server ID mapping not supported for this storage!'); if (this._pendingLoads.length) { - this._deferredCalls.push(this.deleteMessageByServerId.bind(this, srvid)); + this._deferredCalls.push(this.deleteMessageByServerId.bind(this, srvid, + callback)); return; } @@ -4245,7 +4255,7 @@ FolderStorage.prototype = { for (var i = 0; i < headers.length; i++) { var header = headers[i]; if (header.srvid === srvid) { - this.deleteMessageHeaderAndBodyUsingHeader(header); + this.deleteMessageHeaderAndBodyUsingHeader(header, callback); return; } } @@ -4661,6 +4671,8 @@ var LOGFAB = exports.LOGFAB = $log.register(module, { updateMessageHeader: { date: false, id: false, srvid: false }, updateMessageBody: { date: false, id: false }, + generatePersistenceInfo: {}, + // For now, logging date and uid is useful because the general logging // level will show us if we are trying to redundantly delete things. // Also, date and uid are opaque identifiers with very little entropy @@ -4680,7 +4692,8 @@ var LOGFAB = exports.LOGFAB = $log.register(module, { syncedToDawnOfTime: {}, }, TEST_ONLY_events: { - addMessageBody: { body: false } + addMessageBody: { body: false }, + generatePersistenceInfo: { details: false } }, asyncJobs: { loadBlock: { type: false, blockId: false }, diff --git a/js/pop3/sync.js b/js/pop3/sync.js index 0be058b4..d6f9e115 100644 --- a/js/pop3/sync.js +++ b/js/pop3/sync.js @@ -107,9 +107,7 @@ Pop3FolderSyncer.prototype = { for (var k in results) { err = results[k][0]; } - storage.runAfterDeferredCalls(function() { - callback(err, headers.length); - }); + callback(err, headers.length); }); }), @@ -538,9 +536,7 @@ Pop3FolderSyncer.prototype = { checkpoint: function(next) { // Every N messages, wait for everything to be stored to // disk and saved in the database. Then proceed. - this.storage.runAfterDeferredCalls(function() { - this.account.__checkpointSyncCompleted(next, 'syncBatch'); - }.bind(this)); + this.account.__checkpointSyncCompleted(next, 'syncBatch'); }.bind(this), progress: function fetchProgress(evt) { // Store each message as it is retrieved. @@ -582,7 +578,7 @@ Pop3FolderSyncer.prototype = { // When all of the messages have been persisted to disk, indicate // that we've successfully synced. Refresh our view of the world. - this.storage.runAfterDeferredCalls(fetchDoneCb); + fetchDoneCb(); }.bind(this)); } diff --git a/js/searchfilter.js b/js/searchfilter.js index 8c354cfe..5584dad2 100755 --- a/js/searchfilter.js +++ b/js/searchfilter.js @@ -79,6 +79,7 @@ define( [ 'rdcommon/log', './util', + './allback', './syncbase', './date', './htmlchew', @@ -88,6 +89,7 @@ define( function( $log, $util, + allback, $syncbase, $date, htmlchew, @@ -545,16 +547,41 @@ console.log('sf: creating SearchSlice:', phrase); if (whatToSearch.subject) filters.push(new SubjectFilter( phrase, 1, CONTEXT_CHARS_BEFORE, CONTEXT_CHARS_AFTER)); - if (whatToSearch.body) + if (whatToSearch.body) { filters.push(new BodyFilter( phrase, whatToSearch.body === 'yes-quotes', 1, CONTEXT_CHARS_BEFORE, CONTEXT_CHARS_AFTER)); + // A latch for use to make sure that _gotMessages' checkHandle calls are + // sequential even when _gotMessages is invoked with no headers and + // !moreMessagesComing. + // + // (getBody calls are inherently ordered, but if we have no headers, then + // the function call that decides whether we fetch more messages needs some + // way to wait for the body loads to occur. Previously we used + // storage.runAfterDeferredCalls, but that's now removed because it was a + // footgun and its semantics were slightly broken to boot.) + // + // TODO: In the future, refactor this logic into a more reusable + // iterator/stream mechanism so that this class doesn't have to deal with + // it. + // + // The usage pattern is this: + // - Whenever we have any bodies to fetch, we create a latch and assign it + // here. + // - Whenever we don't have any bodies to fetch, we use a .then() on the + // current value of the latch, if there is one. + // - We clear this in _gotMessages' checkHandle in the case we are calling + // reqGrow. This avoids the latch hanging around with potential GC + // implications and provides a nice invariant. + this._pendingBodyLoadLatch = null; + } this.filterer = new MessageFilterer(filters); this._bound_gotOlderMessages = this._gotMessages.bind(this, 1); this._bound_gotNewerMessages = this._gotMessages.bind(this, -1); + this.desiredHeaders = $syncbase.INITIAL_FILL_SIZE; this.reset(); } @@ -652,7 +679,17 @@ SearchSlice.prototype = { } } - var checkHandle = function checkHandle(headers, bodies) { + /** + * Called once we have all the data needed to actually check for matches. + * Specifically, we may have had to fetch the bodies. + * + * @param {MailHeader[]} headers + * @param {Object} [resolvedGetBodyCalls] + * The results of an allback.latch() resolved by getBody calls. The + * keys are the headers' suid's and the values are the gotBody argument + * callback list, which will look like [MailBody, header/message suid]. + */ + var checkHandle = function checkHandle(headers, resolvedGetBodyCalls) { if (!this._bridgeHandle) { return; } @@ -661,7 +698,8 @@ SearchSlice.prototype = { var matchPairs = []; for (i = 0; i < headers.length; i++) { var header = headers[i], - body = bodies ? bodies[i] : null; + body = resolvedGetBodyCalls ? resolvedGetBodyCalls[header.id][0] : + null; this._headersChecked++; var matchObj = this.filterer.testMessage(header, body); if (matchObj) @@ -718,6 +756,7 @@ SearchSlice.prototype = { if (wantMore) { console.log(logPrefix, 'requesting more because no matches but want more'); + this._pendingBodyLoadLatch = null; this.reqGrow(dir, false, true); } else { @@ -736,30 +775,25 @@ SearchSlice.prototype = { if (this.filterer.bodiesNeeded) { // To batch our updates to the UI, just get all the bodies then advance // to the next stage of processing. - var bodies = []; - var gotBody = function(body) { - if (!body) { - console.log(logPrefix, 'failed to get a body for: ', - headers[bodies.length].suid, - headers[bodies.length].subject); + + // See the docs in the constructor on _pendingBodyLoadLatch. + if (headers.length) { + var latch = this._pendingBodyLoadLatch = allback.latch(); + for (var i = 0; i < headers.length; i++) { + var header = headers[i]; + this._storage.getMessageBody( + header.suid, header.date, latch.defer(header.id)); } - bodies.push(body); - if (bodies.length === headers.length) { - checkHandle(headers, bodies); + latch.then(checkHandle.bind(null, headers)); + } else { + // note that we are explicitly binding things so the existing result + // from _pendingBodyLoadLatch will be positionally extra and unused. + var deferredCheck = checkHandle.bind(null, headers, null); + if (this._pendingBodyLoadLatch) { + this._pendingBodyLoadLatch.then(deferredCheck); + } else { + deferredCheck(); } - }; - for (var i = 0; i < headers.length; i++) { - var header = headers[i]; - this._storage.getMessageBody(header.suid, header.date, gotBody); - } - if (!headers.length) { - // To maintain consistent ordering for correctness we need to make sure - // we won't call checkHeaders (to trigger additional fetches if - // required) before any outstanding getMessageBody calls return. - // runAfterDeferredCalls guarantees us consistency with how - // getMessageBody operates in this scenario. - this._storage.runAfterDeferredCalls( - checkHandle.bind(null, headers, null)); } } else { diff --git a/package.json b/package.json index 20461969..134d4a1d 100644 --- a/package.json +++ b/package.json @@ -23,7 +23,7 @@ "url": "https://github.com/mozilla-b2g/gaia-email-libs-and-more/issues" }, "devDependencies": { - "mail-fakeservers": "~0.0.28", + "mail-fakeservers": "~0.0.29", "mozilla-download": "~0.4" }, "volo": { diff --git a/test/unit/resources/folder_storage_shared.js b/test/unit/resources/folder_storage_shared.js index 50cca0b6..31a3ce9e 100644 --- a/test/unit/resources/folder_storage_shared.js +++ b/test/unit/resources/folder_storage_shared.js @@ -2,6 +2,7 @@ define( [ 'module', 'exports', + './th_main', 'mailslice', 'syncbase', 'slice_bridge_proxy' @@ -9,6 +10,7 @@ define( function( $module, exports, + $th_main, $mailslice, $syncbase, $sliceBridgeProxy @@ -16,8 +18,25 @@ define( function MockDB() { + this.fakeUnloadedBodyBlocks = {}; + this._pendingLoad = null; } MockDB.prototype = { + loadBodyBlock: function(folderId, blockId, onLoaded) { + this._pendingLoad = function() { + onLoaded(this.fakeUnloadedBodyBlocks[blockId]); + }.bind(this); + }, + + get hasPendingLoad() { + return this._pendingLoad !== null; + }, + + releasePendingLoad: function() { + var pendingLoad = this._pendingLoad; + this._pendingLoad = null; + pendingLoad(); + } }; function MockAccount() { @@ -81,6 +100,7 @@ exports.makeMockishSlice = function makeMockishSlice(storage) { * Create the FolderStorage instance for a test run plus the required mocks. */ exports.makeTestContext = function makeTestContext(account) { + $th_main.thunkConsoleForNonTestUniverse(); var db = new MockDB(); // some tests interact with account features like the universe so generally we @@ -141,6 +161,8 @@ exports.makeTestContext = function makeTestContext(account) { insertBody: function(date, uid, size, expectedBlockIndex) { var blockInfo = null; var bodyInfo = this.bodyFactory(date, size); + // snapshot the block info state prior to manipulation + var preBlockStateString = JSON.stringify(storage._bodyBlockInfos); storage._insertIntoBlockUsingDateAndUID( 'body', date, uid, 'S' + uid, size, bodyInfo, function blockPicked(info, block) { @@ -156,43 +178,139 @@ exports.makeTestContext = function makeTestContext(account) { if (!block.bodies.hasOwnProperty(uid)) do_throw('body was not inserted!'); }); + if (storage._imapDb.hasPendingLoad) { + // Make sure that the block info state did not change yet! + do_check_eq(JSON.stringify(storage._bodyBlockInfos), + preBlockStateString, + 'insert with a load should not have changed the state'); + // inserts can only trigger one load at a time; so just this one release + // is fine. + storage._imapDb.releasePendingLoad(); + } return bodyInfo; }, deleteBody: function(date, uid) { + // snapshot the block info state prior to manipulation + var preBlockStateString = JSON.stringify(storage._bodyBlockInfos); storage._deleteFromBlock('body', date, uid, function blockDeleted() { }); + if (storage._imapDb.hasPendingLoad) { + // Make sure that the block info state did not change yet! + do_check_eq(JSON.stringify(storage._bodyBlockInfos), + preBlockStateString, + 'delete with a load should not have changed the state'); + storage._imapDb.releasePendingLoad(); + } }, /** - * Clear the list of dirty blocks. + * Clear the list of dirty body blocks and transfer the blocks to be in a + * mock unloaded state. */ resetDirtyBlocks: function() { + var fakeUnloadedBodyBlocks = storage._imapDb.fakeUnloadedBodyBlocks; + var dirtyBlocks = storage._dirtyBodyBlocks; + for (var blockId in dirtyBlocks) { + var blockValue = dirtyBlocks[blockId]; + // Actually delete from the map so we get undefined and are otherwise + // consistent with how the DB would handle this, etc. + if (blockValue === null) { + delete fakeUnloadedBodyBlocks[blockId]; + } else { + fakeUnloadedBodyBlocks[blockId] = blockValue; + } + delete storage._bodyBlocks[blockId]; + var idx = storage._loadedBodyBlockInfos.findIndex(function(bi) { + return bi.blockId === blockId; + }); + storage._loadedBodyBlockInfos.splice(idx, 1); + } storage._dirtyBodyBlocks = {}; }, /** - * Assert that all of the given body blocks are marked dirty. + * Assert that all of the given body blocks are marked dirty and that the + * given blocks were marked as nuked. + * + * @param {Number[]} [bodyIndices] + * The *INDICES* of the blocks in their ordered list that we expect to + * be dirty. This is different and has nothing to do with the block id! + * @param {BlockInfo[]} [nukedInfos] + * The BlockInfos of nuked blocks. */ checkDirtyBodyBlocks: function(bodyIndices, nukedInfos) { var i, blockInfo; + var actualDirtyBlockIndices = [], actualNukedBlocks = []; + if (bodyIndices == null) bodyIndices = []; - for (i = 0; i < bodyIndices.length; i++) { - blockInfo = storage._bodyBlockInfos[bodyIndices[i]]; - do_check_true( - storage._dirtyBodyBlocks.hasOwnProperty(blockInfo.blockId)); - do_check_true( - storage._dirtyBodyBlocks[blockInfo.blockId] === - storage._bodyBlocks[blockInfo.blockId]); - } if (nukedInfos == null) nukedInfos = []; - for (i = 0; i < nukedInfos.length; i++) { - blockInfo = nukedInfos[blockInfo]; - do_check_true( - storage._dirtyBodyBlocks.hasOwnProperty(blockInfo.blockId)); - do_check_true( - storage._dirtyBodyBlocks[blockInfo.blockId] === null); + + // note: it's absolutely required that we snapshot the contents of the + // list even if we weren't trying to do sorting things/etc. + var sortedExpectedDirty = bodyIndices.concat(); + sortedExpectedDirty.sort(); + var sortedExpectedNuked = nukedInfos.map( + function(x) { return x.blockId; }); + sortedExpectedNuked.sort(); + exports.gLazyLogger.expect_namedValue( + 'dirtyBlockIndices', sortedExpectedDirty); + exports.gLazyLogger.expect_namedValue( + 'nukedBlockIds', sortedExpectedNuked); + + for (var key in storage._dirtyBodyBlocks) { + var dirtyBlock = storage._dirtyBodyBlocks[key]; + if (dirtyBlock === null) { + actualNukedBlocks.push(key); + if (storage._bodyBlocks.hasOwnProperty(key)) { + exports.gLazyLogger.error( + 'nuked block should no longer be present'); + } + } else { + // that dirty Block had better be the same reference as in our + // canonical dictionary too! + if (dirtyBlock !== storage._bodyBlocks[key]) { + exports.gLazyLogger.error('dirty block identity mismatch!'); + } + var dirtyBlockIndex = storage._bodyBlockInfos.findIndex(function(x) { + return x.blockId === key; + }); + actualDirtyBlockIndices.push(dirtyBlockIndex); + } + } + actualDirtyBlockIndices.sort(); + actualNukedBlocks.sort(); + + exports.gLazyLogger.namedValue( + 'dirtyBlockIndices', actualDirtyBlockIndices); + exports.gLazyLogger.namedValue( + 'nukedBlockIds', actualNukedBlocks); + }, + + checkBodyBlockContents: function(bodyIndex, ids, bodies) { + var blockInfo = storage._bodyBlockInfos[bodyIndex]; + var bodyBlock = storage._bodyBlocks[blockInfo.blockId]; + // Because of our hack where we fake write-outs/discarding, we need to + // check the written-to-disk store when the current state isn't covered + // by the body map or the dirty state map (which includes deletions) + var blockId = blockInfo.blockId; + if (!storage._bodyBlocks.hasOwnProperty(blockId) && + !storage._dirtyBodyBlocks.hasOwnProperty(blockId) && + storage._imapDb.fakeUnloadedBodyBlocks.hasOwnProperty(blockId)) { + bodyBlock = storage._imapDb.fakeUnloadedBodyBlocks[blockId]; + } + do_check_neq(bodyBlock, undefined); + do_check_eq(ids.length, bodyBlock.ids.length); + for (var i = 0; i < ids.length; i++){ + do_check_eq(ids[i], bodyBlock.ids[i]); + do_check_eq(bodies[i], bodyBlock.bodies[ids[i]]); } + for (var key in bodyBlock.bodies) { + if (ids.indexOf(parseInt(key, 10)) === -1) { + do_throw('Body block contains body it should not: ' + key); + } + }; }, + /** * Create a new header; no expectations, this is just setup logic. */ diff --git a/test/unit/resources/th_main.js b/test/unit/resources/th_main.js index cceb046f..04eb3cbb 100644 --- a/test/unit/resources/th_main.js +++ b/test/unit/resources/th_main.js @@ -1787,6 +1787,7 @@ var TestCommonAccountMixins = { storageActor.ignore_updateMessageHeader(); storageActor.ignore_updateMessageBody(); storageActor.ignore_deleteFromBlock(); + storageActor.ignore_generatePersistenceInfo(); }, /** diff --git a/test/unit/test_allback_latch.js b/test/unit/test_allback_latch.js index 3267a69d..6d87d367 100644 --- a/test/unit/test_allback_latch.js +++ b/test/unit/test_allback_latch.js @@ -25,6 +25,46 @@ TD.commonSimple('basic latch', function(eLazy) { }); }); +TD.commonSimple('extract error results', function(eLazy) { + eLazy.expect_namedValueD('no errors', null); + eLazy.expect_namedValueD('one error', 'justme'); + eLazy.expect_namedValueD('first error of two', 'one'); + + var latchNoErrors = allback.latch(); + latchNoErrors.defer('one')(null); + latchNoErrors.defer('two')(null); + + var latchOneError = allback.latch(); + latchOneError.defer('one')(null); + latchOneError.defer('two')('justme'); + + var latchTwoErrors = allback.latch(); + latchTwoErrors.defer('one')('one'); + latchTwoErrors.defer('two')('two'); + + latchNoErrors.then(function(results) { + eLazy.namedValueD('no errors', + allback.extractErrFromCallbackArgs(results), + results); + }); + + latchOneError.then(function(results) { + eLazy.namedValueD('one error', + allback.extractErrFromCallbackArgs(results), + results); + }); + + // Note that objects maintain their ordering although the caller arguably + // should probably avoid depending on this a bit. + latchTwoErrors.then(function(results) { + eLazy.namedValueD('first error of two', + allback.extractErrFromCallbackArgs(results), + results); + }); + +}); + + TD.commonSimple('latchedWithRejections', function(eLazy) { eLazy.expect_namedValue( diff --git a/test/unit/test_folder_storage.js b/test/unit/test_folder_storage.js index e34ae43e..466c17d6 100644 --- a/test/unit/test_folder_storage.js +++ b/test/unit/test_folder_storage.js @@ -465,7 +465,8 @@ TD.commonSimple('accuracy refresh check', /** * Helper to check the values in a block info structure. */ -function check_block(blockInfo, count, size, startTS, startUID, endTS, endUID) { +function check_block(blockInfo, count, size, startTS, startUID, endTS, endUID, + uidsAsIds) { do_check_eq(blockInfo.count, count); do_check_eq(blockInfo.startTS, startTS); do_check_eq(blockInfo.startUID, startUID); @@ -474,15 +475,6 @@ function check_block(blockInfo, count, size, startTS, startUID, endTS, endUID) { do_check_eq(blockInfo.estSize, size); } -function check_body_block_contents(bodyBlock, ids, bodies) { - do_check_neq(bodyBlock, undefined); - do_check_eq(ids.length, bodyBlock.ids.length); - for (var i = 0; i < ids.length; i++){ - do_check_eq(ids[i], bodyBlock.ids[i]); - do_check_eq(bodies[i], bodyBlock.bodies[ids[i]]); - } -} - /** * Base case: there are no blocks yet! */ @@ -511,6 +503,13 @@ TD.commonSimple('insertion: no existing blocks', * use the block, checking directional preferences. The directional preferences * test requires us to artificially inject an additional block since we aren't * triggering deletion for these tests. + * + * Specifically: + * - We have a directional preference of preferring older blocks over newer + * blocks as long as we fit. + * - We create a (real) body block 0 that entirely takes place on d5 + * - We create a (fake) body block 1 that is newer and allegedly covers d8/d9, + * it ends up at index 0. */ TD.commonSimple('insertion: adjacent simple', function test_insertion_adjacent_simple(eLazy) { @@ -534,32 +533,42 @@ TD.commonSimple('insertion: adjacent simple', // base case ctx.insertBody(d5, uid2, BS, 0); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); + // - uid growth cases // numerically greater UID ctx.insertBody(d5, uid3, BS, 0); do_check_eq(bodyBlocks.length, 1); check_block(bodyBlocks[0], 2, 2 * BS, d5, uid2, d5, uid3); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); // numerically lesser UID ctx.insertBody(d5, uid1, BS, 0); do_check_eq(bodyBlocks.length, 1); check_block(bodyBlocks[0], 3, 3 * BS, d5, uid1, d5, uid3); - ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); // - directional preferences (after injecting more recent block) - // inject the block that shouldn't be there... + // inject the block that shouldn't be there. It will have var synInfo = ctx.storage._makeBodyBlock(d8, uid4, d9, uid5); synInfo.count = 2; synInfo.estSize = 2 * BS; bodyBlocks.splice(0, 0, synInfo); - // inject one in between, it should favor the older block + // the newly inserted block is dirty and at index 0 + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); + + // inject one in between, it should favor the older block (at index 1) ctx.insertBody(d7, uid6, BS, 1); check_block(bodyBlocks[0], 2, 2 * BS, d8, uid4, d9, uid5); check_block(bodyBlocks[1], 4, 4 * BS, d5, uid1, d7, uid6); + ctx.checkDirtyBodyBlocks([1]); }); /** @@ -580,13 +589,23 @@ TD.commonSimple('insertion in existing block', bodyBlocks = ctx.storage._bodyBlockInfos; ctx.insertBody(d5, uid1, BS, 0); + + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); + + ctx.insertBody(d7, uid2, BS, 0); check_block(bodyBlocks[0], 2, 2 * BS, d5, uid1, d7, uid2); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); + ctx.insertBody(d6, uid3, BS, 0); do_check_eq(bodyBlocks.length, 1); check_block(bodyBlocks[0], 3, 3 * BS, d5, uid1, d7, uid2); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); }); /** @@ -625,12 +644,20 @@ TD.commonSimple('inserting larger-than-block items', var b6 = ctx.insertBody(d6, uid6, TOOBIG, 0); do_check_eq(bodyBlockInfos.length, 1); check_block(bodyBlockInfos[0], 1, TOOBIG, d6, uid6, d6, uid6); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); // - insert younger oversized var b8 = ctx.insertBody(d8, uid8, TOOBIG, 0); do_check_eq(bodyBlockInfos.length, 2); check_block(bodyBlockInfos[0], 1, TOOBIG, d8, uid8, d8, uid8); check_block(bodyBlockInfos[1], 1, TOOBIG, d6, uid6, d6, uid6); + // New block at index 0 was split out of the previous block at index 0, now + // index 1. (Yes, this is more dirtying than required in this test case, but + // our logic is going for a generic / balancing thing where bodies are + // potentially small enough to fit at least a few in.) + ctx.checkDirtyBodyBlocks([0, 1]); + ctx.resetDirtyBlocks(); // - insert older oversized var b4 = ctx.insertBody(d4, uid4, TOOBIG, 2); @@ -638,6 +665,8 @@ TD.commonSimple('inserting larger-than-block items', check_block(bodyBlockInfos[0], 1, TOOBIG, d8, uid8, d8, uid8); check_block(bodyBlockInfos[1], 1, TOOBIG, d6, uid6, d6, uid6); check_block(bodyBlockInfos[2], 1, TOOBIG, d4, uid4, d4, uid4); + ctx.checkDirtyBodyBlocks([1, 2]); // new block at index 2, split off of 1 + ctx.resetDirtyBlocks(); // - insert youngest smalls var b9 = ctx.insertBody(d9, uid9, size9, 0), @@ -647,6 +676,8 @@ TD.commonSimple('inserting larger-than-block items', check_block(bodyBlockInfos[1], 1, TOOBIG, d8, uid8, d8, uid8); check_block(bodyBlockInfos[2], 1, TOOBIG, d6, uid6, d6, uid6); check_block(bodyBlockInfos[3], 1, TOOBIG, d4, uid4, d4, uid4); + ctx.checkDirtyBodyBlocks([0, 1]); // new block at index 0, split from old0=1 + ctx.resetDirtyBlocks(); // - insert oldest smalls var b3 = ctx.insertBody(d3, uid3, size3, 4); @@ -656,6 +687,8 @@ TD.commonSimple('inserting larger-than-block items', check_block(bodyBlockInfos[2], 1, TOOBIG, d6, uid6, d6, uid6); check_block(bodyBlockInfos[3], 1, TOOBIG, d4, uid4, d4, uid4); check_block(bodyBlockInfos[4], 1, size3, d3, uid3, d3, uid3); + ctx.checkDirtyBodyBlocks([3, 4]); // new block at index 4, split off of 3 + ctx.resetDirtyBlocks(); // - insert small between bigs var b7 = ctx.insertBody(d7, uid7, size7, 2); @@ -666,7 +699,12 @@ TD.commonSimple('inserting larger-than-block items', check_block(bodyBlockInfos[3], 1, TOOBIG, d6, uid6, d6, uid6); check_block(bodyBlockInfos[4], 1, TOOBIG, d4, uid4, d4, uid4); check_block(bodyBlockInfos[5], 1, size3, d3, uid3, d3, uid3); - + // the insert logic scanned to index 2, saw it was less than 2.5 (list length + // was 5, which we divide by 2 to figure out how to move further from the + // center), so decided on using block 1. Which we then split, resulting in + // index 2 popping out (since d7 is older than d8). + ctx.checkDirtyBodyBlocks([1, 2]); + ctx.resetDirtyBlocks(); }); /** @@ -693,10 +731,7 @@ TD.commonSimple('insertion in block that will overflow', var b1 = ctx.insertBody(d5, uid1, size1, 0); var b2 = ctx.insertBody(d8, uid2, size2, 0); check_block(bodyBlockInfos[0], 2, size1 + size2, d5, uid1, d8, uid2); - check_body_block_contents( - bodyBlockMap[bodyBlockInfos[0].blockId], - [uid2, uid1], - [b2, b1]); + ctx.checkBodyBlockContents(0, [uid2, uid1], [b2, b1]); ctx.checkDirtyBodyBlocks([0]); ctx.resetDirtyBlocks(); @@ -706,14 +741,8 @@ TD.commonSimple('insertion in block that will overflow', do_check_eq(bodyBlockInfos.length, 2); check_block(bodyBlockInfos[0], 1, size2, d8, uid2, d8, uid2); - check_body_block_contents( - bodyBlockMap[bodyBlockInfos[0].blockId], - [uid2], - [b2]); - check_body_block_contents( - bodyBlockMap[bodyBlockInfos[1].blockId], - [uid3, uid1], - [b3, b1]); + ctx.checkBodyBlockContents(0, [uid2], [b2]); + ctx.checkBodyBlockContents(1, [uid3, uid1], [b3, b1]); check_block(bodyBlockInfos[1], 2, size3 + size1, d5, uid1, d7, uid3); ctx.checkDirtyBodyBlocks([0, 1]); @@ -725,20 +754,11 @@ TD.commonSimple('insertion in block that will overflow', do_check_eq(bodyBlockInfos.length, 3); check_block(bodyBlockInfos[0], 1, size2, d8, uid2, d8, uid2); - check_body_block_contents( - bodyBlockMap[bodyBlockInfos[0].blockId], - [uid2], - [b2]); + ctx.checkBodyBlockContents(0, [uid2], [b2]); check_block(bodyBlockInfos[1], 2, size3 + size4, d6, uid4, d7, uid3); - check_body_block_contents( - bodyBlockMap[bodyBlockInfos[1].blockId], - [uid3, uid4], - [b3, b4]); + ctx.checkBodyBlockContents(1, [uid3, uid4], [b3, b4]); check_block(bodyBlockInfos[2], 1, size1, d5, uid1, d5, uid1); - check_body_block_contents( - bodyBlockMap[bodyBlockInfos[2].blockId], - [uid1], - [b1]); + ctx.checkBodyBlockContents(2, [uid1], [b1]); ctx.checkDirtyBodyBlocks([1, 2]); }); @@ -980,43 +1000,71 @@ TD.commonSimple('body deletion', function test_body_deletion(eLazy) { bodyBlocks = ctx.storage._bodyBlockInfos; // - Setup: [1, 2] - ctx.insertBody(d5, uid1, BIG2, 0); - ctx.insertBody(d8, uid2, BIG2, 0); - ctx.insertBody(d7, uid3, BIG2, 1); + var b1 = ctx.insertBody(d5, uid1, BIG2, 0); + var b2 = ctx.insertBody(d8, uid2, BIG2, 0); + var b3 = ctx.insertBody(d7, uid3, BIG2, 1); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 1, 1 * BIG2, d8, uid2, d8, uid2); check_block(bodyBlocks[1], 2, 2 * BIG2, d5, uid1, d7, uid3); + ctx.checkBodyBlockContents(0, [uid2], [b2]); + ctx.checkBodyBlockContents(1, [uid3, uid1], [b3, b1]); + + ctx.checkDirtyBodyBlocks([0, 1]); // (block 0 and 1 are both new) + ctx.resetDirtyBlocks(); // - Delete to [1, 1], end-side + var nuking = []; // no blocks will be deleted ctx.deleteBody(d7, uid3); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 1, 1 * BIG2, d8, uid2, d8, uid2); check_block(bodyBlocks[1], 1, 1 * BIG2, d5, uid1, d5, uid1); + ctx.checkBodyBlockContents(0, [uid2], [b2]); + ctx.checkBodyBlockContents(1, [uid1], [b1]); + ctx.checkDirtyBodyBlocks([1], nuking); + ctx.resetDirtyBlocks(); // - Put it back in! - ctx.insertBody(d7, uid3, BIG2, 1); + b3 = ctx.insertBody(d7, uid3, BIG2, 1); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 1, 1 * BIG2, d8, uid2, d8, uid2); check_block(bodyBlocks[1], 2, 2 * BIG2, d5, uid1, d7, uid3); + ctx.checkBodyBlockContents(0, [uid2], [b2]); + ctx.checkBodyBlockContents(1, [uid3, uid1], [b3, b1]); + ctx.checkDirtyBodyBlocks([1]); // it got put back in the same block + ctx.resetDirtyBlocks(); // - Delete to [1, 1], start-side + nuking = []; // no blocks will be deleted! ctx.deleteBody(d5, uid1); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 1, 1 * BIG2, d8, uid2, d8, uid2); check_block(bodyBlocks[1], 1, 1 * BIG2, d7, uid3, d7, uid3); + ctx.checkBodyBlockContents(0, [uid2], [b2]); + ctx.checkBodyBlockContents(1, [uid3], [b3]); + ctx.checkDirtyBodyBlocks([1], nuking); + ctx.resetDirtyBlocks(); // - Delete the d8 block entirely + nuking = [bodyBlocks[0]]; // the first block is getting nuked ctx.deleteBody(d8, uid2); + do_check_eq(bodyBlocks.length, 1); check_block(bodyBlocks[0], 1, 1 * BIG2, d7, uid3, d7, uid3); + ctx.checkBodyBlockContents(0, [uid3], [b3]); + ctx.checkDirtyBodyBlocks([], nuking); + ctx.resetDirtyBlocks(); // - Delete the d7 block entirely + nuking = [bodyBlocks[0]]; ctx.deleteBody(d7, uid3); + do_check_eq(bodyBlocks.length, 0); + ctx.checkDirtyBodyBlocks([], nuking); + ctx.resetDirtyBlocks(); }); @@ -1099,24 +1147,33 @@ TD.commonSimple( do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 1, 1 * BIG2, d8, uid2, d8, uid2); check_block(bodyBlocks[1], 2, 2 * BIG2, d5, uid1, d7, uid3); + ctx.checkDirtyBodyBlocks([0, 1]); + ctx.resetDirtyBlocks(); + ctx.deleteBody(d7, uid3); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 1, 1 * BIG2, d8, uid2, d8, uid2); check_block(bodyBlocks[1], 1, 1 * BIG2, d5, uid1, d5, uid1); + ctx.checkDirtyBodyBlocks([1]); + ctx.resetDirtyBlocks(); // - Insert d6, it picks the older one because it's not overflowing ctx.insertBody(d6, uid4, BIG2, 1); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 1, 1 * BIG2, d8, uid2, d8, uid2); check_block(bodyBlocks[1], 2, 2 * BIG2, d5, uid1, d6, uid4); + ctx.checkDirtyBodyBlocks([1]); + ctx.resetDirtyBlocks(); // - Insert d7, it picks the newer one because the older one is overflowing ctx.insertBody(d7, uid3, BIG2, 0); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 2, 2 * BIG2, d7, uid3, d8, uid2); check_block(bodyBlocks[1], 2, 2 * BIG2, d5, uid1, d6, uid4); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); // - Insert another d7 with lower UID so it is 'outside', picks older ctx.insertBody(d7, uid0, BIG2, 1); @@ -1124,6 +1181,8 @@ TD.commonSimple( check_block(bodyBlocks[0], 2, 2 * BIG2, d7, uid3, d8, uid2); check_block(bodyBlocks[1], 2, 2 * BIG2, d6, uid4, d7, uid0); check_block(bodyBlocks[2], 1, 1 * BIG2, d5, uid1, d5, uid1); + ctx.checkDirtyBodyBlocks([1, 2]); // it picks 1 which gets split into 1,2 + ctx.resetDirtyBlocks(); }); /** @@ -1148,32 +1207,54 @@ TD.commonSimple('insertion differing only by UIDs', ctx.insertBody(d5, uid5, BIG3, 0); do_check_eq(bodyBlocks.length, 1); check_block(bodyBlocks[0], 2, 2 * BIG3, d5, uid2, d5, uid5); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); ctx.insertBody(d5, uid4, BIG3, 0); do_check_eq(bodyBlocks.length, 1); check_block(bodyBlocks[0], 3, 3 * BIG3, d5, uid2, d5, uid5); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); ctx.insertBody(d5, uid3, BIG3, 1); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 2, 2 * BIG3, d5, uid4, d5, uid5); check_block(bodyBlocks[1], 2, 2 * BIG3, d5, uid2, d5, uid3); + ctx.checkDirtyBodyBlocks([0, 1]); + ctx.resetDirtyBlocks(); ctx.insertBody(d5, uid1, BIG3, 1); + check_block(bodyBlocks[0], 2, 2 * BIG3, d5, uid4, d5, uid5); + check_block(bodyBlocks[1], 3, 3 * BIG3, d5, uid1, d5, uid3); + ctx.checkDirtyBodyBlocks([1]); + ctx.resetDirtyBlocks(); + ctx.insertBody(d5, uid6, BIG3, 0); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 3, 3 * BIG3, d5, uid4, d5, uid6); check_block(bodyBlocks[1], 3, 3 * BIG3, d5, uid1, d5, uid3); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); ctx.deleteBody(d5, uid4); + check_block(bodyBlocks[0], 2, 2 * BIG3, d5, uid5, d5, uid6); + check_block(bodyBlocks[1], 3, 3 * BIG3, d5, uid1, d5, uid3); + ctx.checkDirtyBodyBlocks([0]); + ctx.resetDirtyBlocks(); + ctx.deleteBody(d5, uid3); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 2, 2 * BIG3, d5, uid5, d5, uid6); check_block(bodyBlocks[1], 2, 2 * BIG3, d5, uid1, d5, uid2); + ctx.checkDirtyBodyBlocks([1]); + ctx.resetDirtyBlocks(); ctx.insertBody(d5, uid3, BIG3, 1); do_check_eq(bodyBlocks.length, 2); check_block(bodyBlocks[0], 2, 2 * BIG3, d5, uid5, d5, uid6); check_block(bodyBlocks[1], 3, 3 * BIG3, d5, uid1, d5, uid3); + ctx.checkDirtyBodyBlocks([1]); + ctx.resetDirtyBlocks(); }); // Expect, where 'first' is first reported, and 'last' is last reported, diff --git a/test/unit/test_search_slice.js b/test/unit/test_search_slice.js index a0d11017..3f47a3b0 100644 --- a/test/unit/test_search_slice.js +++ b/test/unit/test_search_slice.js @@ -99,7 +99,7 @@ TD.commonCase('stop searching when killed', function(T, RT) { backendSlice.die(); eLazy.namedValue('slices.length post-die', folderStorage._slices.length); - folderStorage.runAfterDeferredCalls(function() { + folderStorage._deferredCalls.push(function() { eLazy.event('all blocks loaded'); eLazy.namedValue('messages checked', backendSlice.filterer.messagesChecked);