/
hybrid_unique_index_with_updates.js
157 lines (137 loc) · 5.97 KB
/
hybrid_unique_index_with_updates.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
/**
* Tests that write operations are accepted and result in correct indexing behavior for each phase
* of hybrid unique index builds. This test inserts a duplicate document at different phases of an
* index build to confirm that the resulting behavior is failure.
*
* @tags: [requires_document_locking]
*/
(function() {
"use strict";
load("jstests/libs/check_log.js");
let conn = MongoRunner.runMongod();
let testDB = conn.getDB('test');
// Run 'func' while failpoint is enabled.
let doDuringFailpoint = function(failPointName, logMessage, func, i) {
clearRawMongoProgramOutput();
assert.commandWorked(testDB.adminCommand(
{configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
assert.soon(() => rawMongoProgramOutput().indexOf(logMessage) >= 0);
func();
assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
};
const docsToInsert = 10;
let setUp = function(coll) {
coll.drop();
let bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < docsToInsert; i++) {
bulk.insert({i: i});
}
assert.commandWorked(bulk.execute());
};
let buildIndexInBackground = function(expectDuplicateKeyError) {
if (expectDuplicateKeyError) {
return startParallelShell(function() {
assert.commandFailedWithCode(
db.hybrid.createIndex({i: 1}, {background: true, unique: true}),
ErrorCodes.DuplicateKey);
}, conn.port);
}
return startParallelShell(function() {
assert.commandWorked(db.hybrid.createIndex({i: 1}, {background: true, unique: true}));
}, conn.port);
};
/**
* Run a background index build on a unique index under different configurations. Introduce
* duplicate keys on the index that may cause it to fail or succeed, depending on the following
* optional parmeters:
* {
* // Which operation used to introduce a duplicate key.
* operation {string}: "insert", "update"
*
* // Whether or not resolve the duplicate key before completing the build.
* resolve {bool}
*
* // Which phase of the index build to introduce the duplicate key.
* phase {number}: 0-4
* }
*/
let runTest = function(config) {
jsTestLog("running test with config: " + tojson(config));
setUp(testDB.hybrid);
// Expect the build to fail with a duplicate key error if we insert a duplicate key and
// don't resolve it.
let expectDuplicate = config.resolve === false;
let awaitBuild = buildIndexInBackground(expectDuplicate);
// Introduce a duplicate key, either from an insert or update. Optionally, follow-up with an
// operation that will resolve the duplicate by removing it or updating it.
const dup = {i: 0};
let doOperation = function() {
if ("insert" == config.operation) {
assert.commandWorked(testDB.hybrid.insert(dup));
if (config.resolve) {
assert.commandWorked(testDB.hybrid.deleteOne(dup));
}
} else if ("update" == config.operation) {
assert.commandWorked(testDB.hybrid.update(dup, {i: 1}));
if (config.resolve) {
assert.commandWorked(testDB.hybrid.update({i: 1}, dup));
}
}
};
const stopKey = 0;
switch (config.phase) {
// Don't hang the build.
case undefined:
break;
// Hang before scanning the first document.
case 0:
doDuringFailpoint("hangBeforeIndexBuildOf",
"Hanging before index build of i=" + stopKey,
doOperation,
stopKey);
break;
// Hang after scanning the first document.
case 1:
doDuringFailpoint("hangAfterIndexBuildOf",
"Hanging after index build of i=" + stopKey,
doOperation,
stopKey);
break;
// Hang before the first drain and after dumping the keys from the external sorter into
// the index.
case 2:
doDuringFailpoint("hangAfterIndexBuildDumpsInsertsFromBulk",
"Hanging after dumping inserts from bulk builder",
doOperation);
break;
// Hang before the second drain.
case 3:
doDuringFailpoint("hangAfterIndexBuildFirstDrain",
"Hanging after index build first drain",
doOperation);
break;
// Hang before the final drain and commit.
case 4:
doDuringFailpoint("hangAfterIndexBuildSecondDrain",
"Hanging after index build second drain",
doOperation);
break;
default:
assert(false, "Invalid phase: " + config.phase);
}
awaitBuild();
let expectedDocs = docsToInsert;
expectedDocs += (config.operation == "insert" && config.resolve === false) ? 1 : 0;
assert.eq(expectedDocs, testDB.hybrid.count());
assert.eq(expectedDocs, testDB.hybrid.find().itcount());
assert.commandWorked(testDB.hybrid.validate({full: true}));
};
runTest({});
for (let i = 0; i <= 4; i++) {
runTest({operation: "insert", resolve: true, phase: i});
runTest({operation: "insert", resolve: false, phase: i});
runTest({operation: "update", resolve: true, phase: i});
runTest({operation: "update", resolve: false, phase: i});
}
MongoRunner.stopMongod(conn);
})();