Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge branch 'master' of github.com:mongodb/mongo

  • Loading branch information...
commit 0c260c00f30c63e9d55f2e8adddac5be0440e40a 2 parents 532bc1a + ec6187a
@dwight dwight authored
Showing with 6,947 additions and 2,355 deletions.
  1. +5 −3 SConscript.smoke
  2. +12 −11 SConstruct
  3. +54 −0 buildscripts/aggregate_tracefiles.py
  4. +20 −1 buildscripts/s3del.py
  5. +26 −9 buildscripts/smoke.py
  6. +3 −2 jstests/aggregation/bugs/server6045.js
  7. +26 −0 jstests/aggregation/bugs/server6861.js
  8. +17 −0 jstests/cursorb.js
  9. +9 −2 jstests/distinct3.js
  10. +32 −9 jstests/evalb.js
  11. +21 −0 jstests/find_and_modify_server6909.js
  12. +9 −0 jstests/find_and_modify_server6993.js
  13. +2 −0  jstests/multiVersion/multi_version_sharding_passthrough.js
  14. +14 −0 jstests/queryoptimizerc.js
  15. +22 −0 jstests/regex_limit.js
  16. +2 −0  jstests/replsets/initial_sync3.js
  17. +13 −4 jstests/replsets/majority.js
  18. +4 −0 jstests/replsets/replset5.js
  19. +2 −0  jstests/replsets/tags.js
  20. +42 −0 jstests/sharding/delete_during_migrate.js
  21. +0 −43 jstests/sharding/deletion_range.js
  22. +4 −4 jstests/sharding/findandmodify2.js
  23. +11 −2 jstests/sharding/mongos_validate_backoff.js
  24. +74 −3 jstests/sharding/prefix_shard_key.js
  25. +3 −1 jstests/sharding/read_pref.js
  26. +1 −0  jstests/sharding/read_pref_rs_client.js
  27. +115 −0 jstests/sharding/writeback_bulk_insert.js
  28. +1 −1  jstests/slowNightly/balance_repl.js
  29. +6 −1 jstests/slowNightly/balance_tags1.js
  30. 0  jstests/{ → slowNightly}/memory.js
  31. +24 −0 jstests/splitvector.js
  32. +89 −0 jstests/tool/tool_replset.js
  33. +150 −156 rpm/mongo.spec
  34. +2 −2 site_scons/libdeps.py
  35. +19 −3 src/mongo/SConscript
  36. +18 −0 src/mongo/base/SConscript
  37. +33 −0 src/mongo/base/disallow_copying.h
  38. +91 −0 src/mongo/base/error_codes.h
  39. +149 −0 src/mongo/base/initializer_dependency_graph.cpp
  40. +118 −0 src/mongo/base/initializer_dependency_graph.h
  41. +273 −0 src/mongo/base/initializer_dependency_graph_test.cpp
  42. +34 −0 src/mongo/base/initializer_function.h
  43. +39 −0 src/mongo/base/make_string_vector.cpp
  44. +46 −0 src/mongo/base/make_string_vector.h
  45. +55 −0 src/mongo/base/owned_pointer_vector.h
  46. +81 −0 src/mongo/base/owned_pointer_vector_test.cpp
  47. +103 −0 src/mongo/base/status.cpp
  48. +132 −0 src/mongo/base/status.h
  49. +66 −0 src/mongo/base/status_test.cpp
  50. +6 −4 src/mongo/bson/bson-inl.h
  51. +2 −2 src/mongo/bson/bson.h
  52. +1 −1  src/mongo/bson/oid.cpp
  53. +1 −1  src/mongo/bson/oid.h
  54. +14 −15 src/mongo/client/connection_factory.cpp
  55. +1 −1  src/mongo/client/connpool.h
  56. +7 −0 src/mongo/client/dbclient.cpp
  57. +6 −0 src/mongo/client/dbclient_rs.h
  58. +3 −3 src/mongo/client/dbclientinterface.h
  59. +7 −2 src/mongo/client/distlock.cpp
  60. +1 −1  src/mongo/client/distlock.h
  61. +4 −4 src/mongo/client/gridfs.cpp
  62. +8 −8 src/mongo/client/syncclusterconnection.cpp
  63. +5 −2 src/mongo/client/syncclusterconnection.h
  64. +11 −17 src/mongo/db/btree.cpp
  65. +54 −16 src/mongo/db/client.cpp
  66. +4 −4 src/mongo/db/client.h
  67. +1 −1  src/mongo/db/clientcursor.cpp
  68. +3 −0  src/mongo/db/clientcursor.h
  69. +40 −6 src/mongo/db/commands/find_and_modify.cpp
  70. +11 −5 src/mongo/db/commands/group.cpp
  71. +3 −2 src/mongo/db/commands/mr.cpp
  72. +1 −1  src/mongo/db/commands/mr.h
  73. +16 −2 src/mongo/db/curop.h
  74. +23 −18 src/mongo/db/dbcommands.cpp
  75. +7 −7 src/mongo/db/dbcommands_admin.cpp
  76. +14 −15 src/mongo/db/dbmessage.cpp
  77. +4 −4 src/mongo/db/dbwebserver.cpp
  78. +1 −1  src/mongo/db/dbwebserver.h
  79. +37 −28 src/mongo/db/dur.cpp
  80. +3 −3 src/mongo/db/dur.h
  81. +1 −1  src/mongo/db/durop.cpp
  82. +2 −2 src/mongo/db/durop.h
  83. +1 −1  src/mongo/db/extsort.cpp
  84. +2 −2 src/mongo/db/extsort.h
  85. +43 −7 src/mongo/db/geo/2d.cpp
  86. +1 −1  src/mongo/db/geo/core.h
  87. +1 −1  src/mongo/db/helpers/dblogger.h
  88. +1 −1  src/mongo/db/index_update.cpp
  89. +5 −1 src/mongo/db/index_update.h
  90. +1 −1  src/mongo/db/instance.cpp
  91. +17 −8 src/mongo/db/introspect.cpp
  92. +14 −15 src/mongo/db/json.cpp
  93. +6 −0 src/mongo/db/matcher.cpp
  94. +7 −0 src/mongo/db/matcher.h
  95. +1 −1  src/mongo/db/memconcept.cpp
  96. +5 −1 src/mongo/db/mongod.vcxproj
  97. +13 −1 src/mongo/db/mongod.vcxproj.filters
  98. +3 −3 src/mongo/db/mongommf.cpp
  99. +3 −3 src/mongo/db/mongommf.h
  100. +32 −24 src/mongo/db/oplog.cpp
  101. +16 −3 src/mongo/db/oplog.h
  102. +1 −1  src/mongo/db/oplogreader.h
  103. +10 −9 src/mongo/db/ops/query.cpp
  104. +4 −4 src/mongo/db/ops/query.h
  105. +1 −1  src/mongo/db/pdfile.cpp
  106. +1 −1  src/mongo/db/pdfile.h
  107. +7 −6 src/mongo/db/pipeline/accumulator.cpp
  108. +2 −2 src/mongo/db/pipeline/accumulator.h
  109. +3 −3 src/mongo/db/pipeline/builder.cpp
  110. +4 −4 src/mongo/db/pipeline/builder.h
  111. +1 −1  src/mongo/db/pipeline/document_source.h
  112. +1 −1  src/mongo/db/pipeline/document_source_group.cpp
  113. +6 −6 src/mongo/db/pipeline/expression.cpp
  114. +7 −7 src/mongo/db/pipeline/expression.h
  115. +18 −30 src/mongo/db/pipeline/pipeline.cpp
  116. +1 −1  src/mongo/db/pipeline/value.cpp
  117. +1 −1  src/mongo/db/pipeline/value.h
  118. +2 −2 src/mongo/db/queryoptimizer.cpp
  119. +2 −2 src/mongo/db/queryoptimizer.h
  120. +4 −0 src/mongo/db/queryutil.cpp
  121. +3 −2 src/mongo/db/repl.cpp
  122. +1 −1  src/mongo/db/repl.h
  123. +24 −6 src/mongo/db/repl/connections.h
  124. +15 −0 src/mongo/db/repl/health.cpp
  125. +7 −1 src/mongo/db/repl/health.h
  126. +97 −9 src/mongo/db/repl/heartbeat.cpp
  127. +8 −6 src/mongo/db/repl/replset_commands.cpp
  128. +62 −53 src/mongo/db/repl/rs.cpp
  129. +18 −12 src/mongo/db/repl/rs.h
  130. +34 −6 src/mongo/db/repl/rs_config.cpp
  131. +23 −3 src/mongo/db/repl/rs_config.h
  132. +4 −2 src/mongo/db/repl/rs_initialsync.cpp
  133. +6 −5 src/mongo/db/repl/rs_initiate.cpp
  134. +9 −3 src/mongo/db/repl/rs_member.h
  135. +2 −12 src/mongo/db/repl/rs_rollback.cpp
  136. +29 −18 src/mongo/db/repl/rs_sync.cpp
  137. +6 −1 src/mongo/db/repl/rs_sync.h
  138. +16 −0 src/mongo/db/repl_block.cpp
  139. +2 −0  src/mongo/db/repl_block.h
  140. +11 −3 src/mongo/db/restapi.cpp
  141. +5 −1 src/mongo/db/ttl.cpp
  142. +137 −0 src/mongo/dbtests/mock/mock_dbclient_connection.cpp
  143. +99 −0 src/mongo/dbtests/mock/mock_dbclient_connection.h
  144. +203 −0 src/mongo/dbtests/mock/mock_remote_db_server.cpp
  145. +174 −0 src/mongo/dbtests/mock/mock_remote_db_server.h
  146. +278 −0 src/mongo/dbtests/mock/mock_replica_set.cpp
  147. +131 −0 src/mongo/dbtests/mock/mock_replica_set.h
  148. +315 −0 src/mongo/dbtests/mock_dbclient_conn_test.cpp
  149. +243 −0 src/mongo/dbtests/mock_replica_set_test.cpp
  150. +109 −0 src/mongo/dbtests/profile_test.cpp
  151. +5 −9 src/mongo/dbtests/queryoptimizercursortests.cpp
  152. +26 −23 src/mongo/dbtests/querytests.cpp
  153. +13 −0 src/mongo/dbtests/queryutiltests.cpp
  154. +917 −1,309 src/mongo/dbtests/replica_set_monitor_test.cpp
  155. +16 −7 src/mongo/dbtests/replsettests.cpp
  156. +206 −0 src/mongo/dbtests/repltests.cpp
  157. +7 −0 src/mongo/dbtests/test.vcxproj
  158. +21 −0 src/mongo/dbtests/test.vcxproj.filters
  159. +2 −0  src/mongo/platform/atomic_intrinsics_win32.h
  160. +44 −0 src/mongo/platform/unordered_map.h
  161. +44 −0 src/mongo/platform/unordered_set.h
  162. +3 −3 src/mongo/s/config.cpp
  163. +3 −3 src/mongo/s/config.h
  164. +12 −2 src/mongo/s/d_logic.cpp
  165. +52 −8 src/mongo/s/d_migrate.cpp
  166. +2 −2 src/mongo/s/grid.cpp
  167. +2 −2 src/mongo/s/grid.h
  168. +5 −1 src/mongo/s/mongos.vcxproj
  169. +13 −1 src/mongo/s/mongos.vcxproj.filters
  170. +10 −4 src/mongo/s/shard_version.cpp
  171. +17 −60 src/mongo/scripting/bench.cpp
  172. +2 −0  src/mongo/scripting/bson_template_evaluator.cpp
  173. +3 −3 src/mongo/scripting/engine.cpp
  174. +6 −2 src/mongo/scripting/engine_spidermonkey.cpp
  175. +5 −2 src/mongo/shell/db.js
  176. +4 −7 src/mongo/shell/dbshell.cpp
  177. +4 −0 src/mongo/shell/mongo.vcxproj
  178. +12 −0 src/mongo/shell/mongo.vcxproj.filters
  179. +7 −2 src/mongo/shell/servers_misc.js
  180. +4 −2 src/mongo/shell/shardingtest.js
  181. +1 −1  src/mongo/shell/utils.js
  182. +5 −4 src/mongo/shell/utils_sh.js
  183. +0 −1  src/mongo/tools/oplog.cpp
  184. +7 −7 src/mongo/tools/tool.cpp
  185. +5 −0 src/mongo/tools/top.cpp
  186. +2 −2 src/mongo/util/assert_util.cpp
  187. +2 −2 src/mongo/util/assert_util.h
  188. +1 −1  src/mongo/util/concurrency/msg.h
  189. +14 −15 src/mongo/util/concurrency/mutexdebugger.cpp
  190. +1 −1  src/mongo/util/concurrency/race.h
  191. +14 −15 src/mongo/util/concurrency/spin_lock.cpp
  192. +14 −15 src/mongo/util/concurrency/task.cpp
  193. +3 −1 src/mongo/util/goodies.h
  194. +2 −2 src/mongo/util/log.cpp
  195. +1 −1  src/mongo/util/log.h
  196. +2 −2 src/mongo/util/logfile.cpp
  197. +1 −1  src/mongo/util/logfile.h
  198. +3 −5 src/mongo/util/map_util.h
  199. +1 −1  src/mongo/util/md5.hpp
  200. +2 −2 src/mongo/util/mmap.cpp
  201. +3 −3 src/mongo/util/mmap.h
  202. +4 −1 src/mongo/util/mmap_win.cpp
  203. +14 −12 src/mongo/util/mongoutils/html.h
  204. +25 −25 src/mongo/util/mongoutils/str.h
  205. +3 −3 src/mongo/util/net/hostandport.h
  206. +2 −2 src/mongo/util/net/httpclient.cpp
  207. +2 −2 src/mongo/util/net/httpclient.h
  208. +1 −1  src/mongo/util/net/listen.h
  209. +4 −0 src/mongo/util/net/message_port.cpp
  210. +2 −0  src/mongo/util/net/message_port.h
  211. +1 −1  src/mongo/util/net/miniwebserver.cpp
  212. +2 −2 src/mongo/util/net/miniwebserver.h
  213. +1 −1  src/mongo/util/net/sock.h
  214. +2 −2 src/mongo/util/ntservice.cpp
  215. +2 −2 src/mongo/util/ntservice.h
  216. +1 −1  src/mongo/util/paths.h
  217. +18 −5 src/mongo/util/processinfo_darwin.cpp
  218. +1 −1  src/mongo/util/progress_meter.h
  219. +4 −4 src/mongo/util/ramlog.cpp
  220. +3 −3 src/mongo/util/ramlog.h
  221. +252 −0 src/mongo/util/safe_num.cpp
  222. +160 −0 src/mongo/util/safe_num.h
  223. +181 −0 src/mongo/util/safe_num_test.cpp
  224. +14 −15 src/mongo/util/stringutils.cpp
  225. +2 −2 src/mongo/util/text.cpp
  226. +2 −2 src/mongo/util/text.h
View
8 SConscript.smoke
@@ -53,7 +53,7 @@ def addSmoketest( name, deps, extraSmokeArgs=[] ):
smokeArgs = smokeFlags + [target] + extraSmokeArgs
addTest(name, deps, utils.run_smoke_command(*smokeArgs))
-def addSmokeSuite( name, suitefile ):
+def addSmokeSuite( name, suitefile, needMongod=False ):
# Add a smoketest target which invokes smoke.py with
# --from-file, and passes the named suitefile as the
# command line argument.
@@ -61,8 +61,10 @@ def addSmokeSuite( name, suitefile ):
# resolve an initial # in the suitefile
suitefile = str(env.File(suitefile))
- addTest(name, [suitefile],
- utils.run_smoke_command('--mode', 'files', '--from-file', suitefile))
+ smoke_args = ['--mode', 'files', '--from-file', suitefile]
+ if not needMongod:
+ smoke_args.append('--dont-start-mongod')
+ addTest(name, [suitefile], utils.run_smoke_command(*smoke_args))
addSmoketest( "smoke", [ add_exe( "test" ), add_exe( "mongod" ), add_exe( "mongo" ) ] )
addSmoketest( "smokePerf", [ add_exe("perftest") ] )
View
23 SConstruct
@@ -555,12 +555,12 @@ elif "win32" == os.sys.platform:
env.Append( CPPDEFINES=[ "MONGO_USE_SRW_ON_WINDOWS" ] )
for pathdir in env['ENV']['PATH'].split(os.pathsep):
- if os.path.exists(os.path.join(pathdir, 'cl.exe')):
+ if os.path.exists(os.path.join(pathdir, 'cl.exe')):
print( "found visual studio at " + pathdir )
- break
+ break
else:
- #use current environment
- env['ENV'] = dict(os.environ)
+ #use current environment
+ env['ENV'] = dict(os.environ)
env.Append( CPPDEFINES=[ "_UNICODE" ] )
env.Append( CPPDEFINES=[ "UNICODE" ] )
@@ -861,7 +861,7 @@ def doConfigure(myenv):
# discover modules (subdirectories of db/modules/), and
# load the (python) module for each module's build.py
- modules = moduleconfig.discover_modules('.')
+ modules = moduleconfig.discover_modules('src/mongo/')
# ask each module to configure itself, and return a
# dictionary of name => list_of_sources for each module.
@@ -927,7 +927,6 @@ env.Alias( "style" , [] , [ doStyling ] )
env.AlwaysBuild( "style" )
-
# ---- INSTALL -------
def getSystemInstallName():
@@ -936,19 +935,21 @@ def getSystemInstallName():
n += "-static"
if has_option("nostrip"):
n += "-debugsymbols"
- if nix and os.uname()[2].startswith( "8." ):
+ if nix and os.uname()[2].startswith("8."):
n += "-tiger"
+ if len(env.get("MONGO_MODULES", None)):
+ n += "-" + "-".join(env["MONGO_MODULES"].keys())
+
try:
findSettingsSetup()
import settings
- if "distmod" in dir( settings ):
- n = n + "-" + str( settings.distmod )
+ if "distmod" in dir(settings):
+ n = n + "-" + str(settings.distmod)
except:
pass
-
- dn = GetOption( "distmod" )
+ dn = GetOption("distmod")
if dn and len(dn) > 0:
n = n + "-" + dn
View
54 buildscripts/aggregate_tracefiles.py
@@ -0,0 +1,54 @@
+import subprocess
+import os
+import sys
+from optparse import OptionParser
+
+""" This script aggregates several tracefiles into one tracefile
+ All but the last argument are input tracefiles or .txt files which list tracefiles.
+ The last argument is the tracefile to which the output will be written
+"""
+def aggregate(inputs, output):
+ """Aggregates the tracefiles given in inputs to a tracefile given by output"""
+ args = ['lcov']
+
+ for name in inputs:
+ args += ['-a', name]
+
+ args += ['-o', output]
+
+ print ' '.join(args)
+
+ return subprocess.call(args)
+
+def getfilesize(path):
+ if not os.path.isfile(path):
+ return 0
+ return os.path.getsize(path)
+
+def main ():
+ inputs = []
+
+ usage = "usage: %prog input1.info input2.info ... output.info"
+ parser = OptionParser(usage=usage)
+
+ (options, args) = parser.parse_args()
+ if len(args) < 2:
+ return "must supply input files"
+
+ for path in args[:-1]:
+ name, ext = os.path.splitext(path)
+
+ if ext == '.info':
+ if getfilesize(path) > 0:
+ inputs.append(path)
+
+ elif ext == '.txt':
+ inputs += [line.strip() for line in open(path)
+ if getfilesize(line.strip()) > 0]
+ else:
+ return "unrecognized file type"
+
+ return aggregate(inputs, args[-1])
+
+if __name__ == '__main__':
+ sys.exit(main())
View
21 buildscripts/s3del.py
@@ -16,11 +16,30 @@
def check_dir( bucket , prefix , todel ):
+ deleteAll = False
+
for ( key , modify , etag , size ) in bucket.listdir( prefix=prefix ):
if key.find( todel ) < 0:
continue
print( key )
- time.sleep( 2 )
+
+ if not deleteAll:
+
+ val = raw_input( "Delete (Y,y,n,N):" ).strip()
+
+ if val == "n":
+ print( "skipping this one" )
+ continue
+ elif val == "N":
+ break
+
+ if val == "Y":
+ val = "y"
+ deleteAll = True
+
+ if val != "y":
+ raise Exception( "invalid input :(" )
+
bucket.delete( key )
def clean( todel ):
View
35 buildscripts/smoke.py
@@ -67,6 +67,7 @@
shell_executable = None
continue_on_failure = None
file_of_commands_mode = False
+start_mongod = True
tests = []
winners = []
@@ -325,20 +326,29 @@ def ternary( b , l="true", r="false" ):
# Blech.
def skipTest(path):
basename = os.path.basename(path)
- parentDir = os.path.basename(os.path.dirname(path))
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
if small_oplog: # For tests running in parallel
if basename in ["cursor8.js", "indexh.js", "dropdb.js"]:
return True
if auth or keyFile: # For tests running with auth
# Skip any tests that run with auth explicitly
- if parentDir == "auth" or "auth" in basename or parentDir == "tool": # SERVER-6368
+ if parentDir == "auth" or "auth" in basename:
return True
# These tests don't pass with authentication due to limitations of the test infrastructure,
# not due to actual bugs.
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ # SERVER-6388
if os.path.join(parentDir,basename) in ["sharding/sync3.js", "sharding/sync6.js", "sharding/parallel.js", "jstests/bench_test1.js", "jstests/bench_test2.js", "jstests/bench_test3.js"]:
return True
+ # SERVER-6972
+ if os.path.join(parentDir,basename) == "sharding/read_pref_rs_client.js":
+ return True
# These tests fail due to bugs
- if os.path.join(parentDir,basename) in ["sharding/sync_conn_cmd.js"]:
+ if os.path.join(parentDir,basename) in ["sharding/sync_conn_cmd.js"]: # SERVER-6327
return True
return False
@@ -441,10 +451,11 @@ def runTest(test):
if r != 0:
raise TestExitFailure(path, r)
- try:
- c = Connection( "127.0.0.1" , int(mongod_port) )
- except Exception,e:
- raise TestServerFailure(path)
+ if start_mongod:
+ try:
+ c = Connection( "127.0.0.1" , int(mongod_port) )
+ except Exception,e:
+ raise TestServerFailure(path)
print ""
@@ -457,7 +468,10 @@ def run_tests(tests):
# The reason we want to use "with" is so that we get __exit__ semantics
# but "with" is only supported on Python 2.5+
- master = mongod(small_oplog_rs=small_oplog_rs,small_oplog=small_oplog,no_journal=no_journal,no_preallocj=no_preallocj,auth=auth).__enter__()
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,small_oplog=small_oplog,no_journal=no_journal,no_preallocj=no_preallocj,auth=auth).__enter__()
+ else:
+ master = Nothing()
try:
if small_oplog:
slave = mongod(slave=True).__enter__()
@@ -628,8 +642,9 @@ def add_exe(e):
return e
def set_globals(options, tests):
- global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, small_oplog_rs, no_journal, no_preallocj, auth, keyFile, smoke_db_prefix, test_path
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, small_oplog_rs, no_journal, no_preallocj, auth, keyFile, smoke_db_prefix, test_path, start_mongod
global file_of_commands_mode
+ start_mongod = options.start_mongod
#Careful, this can be called multiple times
test_path = options.test_path
@@ -789,6 +804,8 @@ def main():
parser.add_option('--with-cleanbb', dest='with_cleanbb', default=False,
action="store_true",
help='Clear database files from previous smoke.py runs')
+ parser.add_option(
+ '--dont-start-mongod', dest='start_mongod', default=True, action='store_false')
# Buildlogger invocation from command line
parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
View
5 jstests/aggregation/bugs/server6045.js
@@ -52,8 +52,9 @@ var s6045p4 = db.runCommand({aggregate:"aggtype", pipeline: [
]});
// Expected result
var a6045 = {
- "errmsg" : "Pipeline received empty document as argument",
- "ok" : 0
+ "errmsg" : "exception: A pipeline stage specification object must contain exactly one field.",
+ "code" : 16435,
+ "ok" : 0
};
// Asserts
View
26 jstests/aggregation/bugs/server6861.js
@@ -0,0 +1,26 @@
+// Providing the wrong number of fields in a pipeline stage specification triggers a parsing error.
+// SERVER-6861
+
+t = db.jstests_server6861;
+t.drop();
+
+t.save( { a:1 } );
+
+function assertCode( code, expression ) {
+ assert.eq( code, t.aggregate( expression ).code );
+}
+
+function assertResult( result, expression ) {
+ assert.eq( result, t.aggregate( expression ).result );
+}
+
+// Correct number of fields.
+assertResult( [ { a:1 } ], { $project:{ _id:0, a:1 } } );
+
+// Incorrect number of fields.
+assertCode( 16435, {} );
+assertCode( 16435, { $project:{ _id:0, a:1 }, $group:{ _id:0 } } );
+assertCode( 16435, { $project:{ _id:0, a:1 }, $group:{ _id:0 }, $sort:{ a:1 } } );
+
+// Invalid stage specification.
+assertCode( 16436, { $noSuchStage:{ a:1 } } );
View
17 jstests/cursorb.js
@@ -0,0 +1,17 @@
+// The 'cursor not found in map -1' warning is not logged when get more exhausts a client cursor.
+// SERVER-6931
+
+t = db.jstests_cursorb;
+t.drop();
+
+// Exhaust a client cursor in get more.
+for( i = 0; i < 200; ++i ) {
+ t.save( { a:i } );
+}
+t.find().itcount();
+
+// Check that the 'cursor not found in map -1' message is not printed. This message indicates an
+// attempt to look up a cursor with an invalid id and should never appear in the log.
+log = db.adminCommand( { getLog:'global' } ).log
+log.forEach( function( line ) { assert( !line.match( /cursor not found in map -1 / ),
+ 'Cursor map lookup with id -1.' ); } );
View
11 jstests/distinct3.js
@@ -16,8 +16,15 @@ for( i = 0; i < 1000; ++i ) {
}
db.getLastError();
-// The idea here is to try and remove the last match for the {a:1} index scan while distinct is yielding.
-p = startParallelShell( 'for( i = 0; i < 2500; ++i ) { db.jstests_distinct3.remove({a:49}); for( j = 0; j < 20; ++j ) { db.jstests_distinct3.save({a:49,c:49,d:j}) } }' );
+// Attempt to remove the last match for the {a:1} index scan while distinct is yielding.
+p = startParallelShell( 'for( i = 0; i < 2500; ++i ) { ' +
+ ' db.jstests_distinct3.remove( { a:49 } ); ' +
+ ' for( j = 0; j < 20; ++j ) { ' +
+ ' db.jstests_distinct3.save( { a:49, c:49, d:j } ); ' +
+ ' } ' +
+ '} ' +
+ '// Wait for the above writes to complete. ' +
+ 'db.getLastError(); ' );
for( i = 0; i < 100; ++i ) {
count = t.distinct( 'c', {$or:[{a:{$gte:0},d:0},{b:{$gte:0}}]} ).length;
View
41 jstests/evalb.js
@@ -1,17 +1,40 @@
+// Check the return value of a db.eval function running a database query, and ensure the function's
+// contents are logged in the profile log.
-t = db.evalb;
-t.drop();
+// Use a reserved database name to avoid a conflict in the parallel test suite.
+var stddb = db;
+var db = db.getSisterDB( 'evalb' );
-t.save( { x : 3 } );
+function profileCursor() {
+ return db.system.profile.find( { user:username } );
+}
-assert.eq( 3, db.eval( function(){ return db.evalb.findOne().x; } ) , "A" );
+function lastOp() {
+ return profileCursor().sort( { $natural:-1 } ).next();
+}
-db.setProfilingLevel( 2 );
+try {
-assert.eq( 3, db.eval( function(){ return db.evalb.findOne().x; } ) , "B" );
+ username = 'jstests_evalb_user';
+ db.addUser( username, 'password', false, 1 );
+ db.auth( username, 'password' );
-o = db.system.profile.find( { "command.$eval" : { $exists : true } } ).sort( { $natural : -1 } ).limit(1).next();
-assert( tojson(o).indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) )
+ t = db.evalb;
+ t.drop();
-db.setProfilingLevel( 0 );
+ t.save( { x:3 } );
+ assert.eq( 3, db.eval( function() { return db.evalb.findOne().x; } ), 'A' );
+
+ db.setProfilingLevel( 2 );
+
+ assert.eq( 3, db.eval( function() { return db.evalb.findOne().x; } ), 'B' );
+
+ o = lastOp();
+ assert( tojson( o ).indexOf( 'findOne().x' ) > 0, 'C : ' + tojson( o ) );
+}
+finally {
+
+ db.setProfilingLevel(0);
+ db = stddb;
+}
View
21 jstests/find_and_modify_server6909.js
@@ -0,0 +1,21 @@
+c = db.find_and_modify_server6906;
+
+
+c.drop();
+
+c.insert( { _id : 5 , a:{ b:1 } } );
+ret = c.findAndModify( { query:{ 'a.b':1 },
+ update:{ $set:{ 'a.b':2 } }, // Ensure the query on 'a.b' no longer matches.
+ new:true } );
+assert.eq( 5, ret._id );
+assert.eq( 2, ret.a.b );
+
+
+c.drop();
+
+c.insert( { _id : null , a:{ b:1 } } );
+ret = c.findAndModify( { query:{ 'a.b':1 },
+ update:{ $set:{ 'a.b':2 } }, // Ensure the query on 'a.b' no longer matches.
+ new:true } );
+assert.eq( 2, ret.a.b );
+
View
9 jstests/find_and_modify_server6993.js
@@ -0,0 +1,9 @@
+
+c = db.find_and_modify_server6993;
+c.drop();
+
+c.insert( { a:[ 1, 2 ] } );
+
+c.findAndModify( { query:{ a:1 }, update:{ $set:{ 'a.$':5 } } } );
+
+assert.eq( 5, c.findOne().a[ 0 ] );
View
2  jstests/multiVersion/multi_version_sharding_passthrough.js
@@ -20,6 +20,7 @@ var testsToIgnore = [ /dbadmin/,
/mr_replaceIntoDB/,
/mr_auth/,
/queryoptimizera/,
+ /regex_limit/, // Not compatible with mongod before 2.3
/features2/ ]
var testsThatAreBuggy = [ /apply_ops1/,
@@ -280,6 +281,7 @@ var v22Only = [ /^all3$/,
/^queryoptimizer8$/,
/^queryoptimizer9$/,
/^queryoptimizerb$/,
+ /^queryoptimizerc$/,
/^regex_util$/,
/^regexb$/,
/^remove10$/,
View
14 jstests/queryoptimizerc.js
@@ -0,0 +1,14 @@
+// Use of an $atomic match expression does not affect choice of index. SERVER-5354
+
+t = db.jstests_queryoptimizerc;
+t.drop();
+
+function checkExplainResults( explain ) {
+ assert.eq( 'BtreeCursor a_1', explain.cursor ); // a:1 index chosen.
+ assert.eq( 1, explain.allPlans.length ); // Only one (optimal) plan is attempted.
+}
+
+t.ensureIndex( { a:1 } );
+
+checkExplainResults( t.find( { a:1 } ).explain( true ) );
+checkExplainResults( t.find( { a:1, $atomic:1 } ).explain( true ) );
View
22 jstests/regex_limit.js
@@ -0,0 +1,22 @@
+var t = db.regex_limit;
+t.drop();
+
+var repeatStr = function(str, n){
+ return new Array(n + 1).join(str);
+};
+
+t.insert({ z: repeatStr('c', 100000) });
+
+var maxOkStrLen = repeatStr('c', 32764);
+var strTooLong = maxOkStrLen + 'c';
+
+assert(t.findOne({ z: { $regex: maxOkStrLen }}) != null);
+assert.throws(function() {
+ t.findOne({ z: { $regex: strTooLong }});
+});
+
+assert(t.findOne({ z: { $in: [ new RegExp(maxOkStrLen) ]}}) != null);
+assert.throws(function() {
+ t.findOne({ z: { $in: [ new RegExp(strTooLong) ]}});
+});
+
View
2  jstests/replsets/initial_sync3.js
@@ -40,6 +40,8 @@ assert(!result.secondary, tojson(result));
print("bring 0 back up");
replTest.restart(0);
+print("0 should become primary");
+master = replTest.getMaster();
print("now 1 should be able to initial sync");
assert.soon(function() {
View
17 jstests/replsets/majority.js
@@ -42,7 +42,10 @@ replTest.stop(5);
replTest.remove(5);
print("should still be able to write to a majority");
-assert.eq(testInsert().err, null);
+var result = testInsert();
+assert.eq(result.err, null);
+// majority should be primary + 2 secondaries
+assert.eq(result.replicatedTo.length, 2);
print("start up some of the arbiters again");
replTest.restart(3);
@@ -71,7 +74,9 @@ testInsert();
replTest.awaitReplication();
print("makes sure majority works");
-assert.eq(testInsert().err, null);
+result = testInsert();
+assert.eq(result.err, null);
+assert.eq(result.replicatedTo.length, 2);
print("setup: 0,1 | 2,3,4");
replTest.partition(0,2);
@@ -84,7 +89,9 @@ replTest.partition(1,4);
print("make sure majority doesn't work");
// primary should now be 2
master = replTest.getMaster();
-assert.eq(testInsert().err, "timeout");
+result = testInsert();
+assert.eq(result.err, "timeout");
+assert.eq(result.replicatedTo.length, 1);
print("bring set back together");
replTest.unPartition(0,2);
@@ -94,7 +101,9 @@ replTest.unPartition(1,4);
master = replTest.getMaster();
print("make sure majority works");
-assert.eq(testInsert().err, null);
+result = testInsert();
+assert.eq(result.err, null);
+assert.eq(result.replicatedTo.length, 2);
replTest.stopSet();
}
View
4 jstests/replsets/replset5.js
@@ -11,6 +11,7 @@ doTest = function (signal) {
var config = replTest.getReplSetConfig();
config.settings = {};
config.settings.getLastErrorDefaults = { 'w': 3, 'wtimeout': 20000 };
+ config.settings.heartbeatTimeoutSecs = 15;
replTest.initiate(config);
@@ -63,6 +64,9 @@ doTest = function (signal) {
print("replset5.js reconfigure with hidden=1");
config = master.getDB("local").system.replset.findOne();
+
+ assert.eq(15, config.settings.heartbeatTimeoutSecs);
+
config.version++;
config.members[2].priority = 0;
config.members[2].hidden = 1;
View
2  jstests/replsets/tags.js
@@ -26,6 +26,8 @@ replTest.initiate({_id : name, members :
}});
var master = replTest.getMaster();
+// make everyone catch up before reconfig
+replTest.awaitReplication();
var config = master.getDB("local").system.replset.findOne();
View
42 jstests/sharding/delete_during_migrate.js
@@ -0,0 +1,42 @@
+// Test migrating a big chunk while deletions are happening within that chunk.
+// Test is slightly non-deterministic, since removes could happen before migrate
+// starts. Protect against that by making chunk very large.
+
+// start up a new sharded cluster
+var st = new ShardingTest({ shards : 2, mongos : 1 });
+
+// stop balancer since we want manual control for this
+st.stopBalancer();
+
+var dbname = "testDB";
+var coll = "foo";
+var ns = dbname + "." + coll;
+var s = st.s0;
+var t = s.getDB( dbname ).getCollection( coll );
+
+// Create fresh collection with lots of docs
+t.drop();
+for ( i=0; i<200000; i++ ){
+ t.insert( { a : i } );
+}
+
+// enable sharding of the collection. Only 1 chunk.
+t.ensureIndex( { a : 1 } );
+s.adminCommand( { enablesharding : dbname } );
+s.adminCommand( { shardcollection : ns , key: { a : 1 } } );
+
+// start a parallel shell that deletes things
+startMongoProgramNoConnect( "mongo" ,
+ "--host" , getHostName() ,
+ "--port" , st.s0.port ,
+ "--eval" , "db." + coll + ".remove({});" ,
+ dbname );
+
+// migrate while deletions are happening
+var moveResult = s.adminCommand( { moveChunk : ns ,
+ find : { a : 1 } ,
+ to : st.getOther( st.getServer( dbname ) ).name } );
+// check if migration worked
+assert( moveResult.ok , "migration didn't work while doing deletes" );
+
+st.stop();
View
43 jstests/sharding/deletion_range.js
@@ -1,43 +0,0 @@
-//
-// Tests deletion ranges for a sharded system when using prefix shard key
-//
-
-var st = new ShardingTest({ shards : 2, mongos : 2 });
-
-st.stopBalancer();
-
-var mongos = st.s0;
-var config = mongos.getDB( "config" );
-var admin = mongos.getDB( "admin" );
-var shards = config.shards.find().toArray();
-var shard0 = new Mongo( shards[0].host );
-var shard1 = new Mongo( shards[1].host );
-
-var coll = mongos.getCollection( "foo.bar" );
-
-printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-printjson( coll.ensureIndex({ skey : 1, extra : 1 }) );
-printjson( admin.runCommand({ shardCollection : coll + "", key : { skey : 1 } }) );
-
-for( var i = 0; i < 5; i++ ){
- coll.insert({ skey : 0, extra : i });
-}
-assert.eq( null, coll.getDB().getLastError() );
-
-printjson( admin.runCommand({ split : coll + "", middle : { skey : 0 } }) );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { skey : 0 }, to : shards[1]._id }) );
-
-printjson( shard0.getCollection( coll + "" ).find().toArray() );
-printjson( shard1.getCollection( coll + "" ).find().toArray() );
-
-assert( coll.find().itcount() == 5 );
-
-printjson( admin.runCommand({ moveChunk : coll + "", find : { skey : -1 }, to : shards[1]._id }) );
-
-assert.eq( 0 , shard0.getCollection( coll + "" ).find().itcount() );
-assert.eq( 5 , shard1.getCollection( coll + "" ).find().itcount() );
-
-assert( coll.find().itcount() == 5 );
-
-st.stop()
View
8 jstests/sharding/findandmodify2.js
@@ -103,10 +103,10 @@ s.printChunks();
print("---------- Verifying that both codepaths resulted in splits...");
-assert.gt( s.config.chunks.count({ "ns": "test." + col_fam }), minChunks, "findAndModify update code path didn't result in splits" );
-assert.gt( s.config.chunks.count({ "ns": "test." + col_fam_upsert }), minChunks, "findAndModify upsert code path didn't result in splits" );
-assert.gt( s.config.chunks.count({ "ns": "test." + col_update }), minChunks, "update code path didn't result in splits" );
-assert.gt( s.config.chunks.count({ "ns": "test." + col_update_upsert }), minChunks, "upsert code path didn't result in splits" );
+assert.gte( s.config.chunks.count({ "ns": "test." + col_fam }), minChunks, "findAndModify update code path didn't result in splits" );
+assert.gte( s.config.chunks.count({ "ns": "test." + col_fam_upsert }), minChunks, "findAndModify upsert code path didn't result in splits" );
+assert.gte( s.config.chunks.count({ "ns": "test." + col_update }), minChunks, "update code path didn't result in splits" );
+assert.gte( s.config.chunks.count({ "ns": "test." + col_update_upsert }), minChunks, "upsert code path didn't result in splits" );
printjson( db[col_update].stats() );
View
13 jstests/sharding/mongos_validate_backoff.js
@@ -26,9 +26,13 @@ var timeBadInsert = function(){
return end - start
}
+// We need to work at least twice in order to check resetting the counter
+var successNeeded = 2;
+var success = 0;
+
// Loop over this test a few times, to ensure that the error counters get reset if we don't have
// bad inserts over a long enough time.
-for( var test = 0; test < 3; test++ ){
+for( var test = 0; test < 5; test++ ){
var firstWait = timeBadInsert()
var lastWait = 0
@@ -39,7 +43,12 @@ for( var test = 0; test < 3; test++ ){
// Kind a heuristic test, we want to make sure that the error wait after sleeping is much less
// than the error wait after a lot of errors
- assert.gt( lastWait, firstWait * 2 * 2 )
+ if( lastWait > firstWait * 2 * 2 ) success++; // Success!
+
+ if( success >= successNeeded ) break;
+
+ // Abort if we've failed too many times
+ assert.lt( test, 4 );
// Sleeping for long enough to reset our exponential counter
sleep( 3000 )
View
77 jstests/sharding/prefix_shard_key.js
@@ -1,14 +1,28 @@
-// Test that you can shard and move chunks around with a shard key that's
-// only a prefix of an existing index
+// Test that you can shard with shard key that's only a prefix of an existing index.
+//
+// Part 1: Shard new collection on {num : 1} with an index on {num : 1, x : 1}.
+// Test that you can split and move chunks around.
+// Part 2: Test that adding an array value for x makes it unusuable. Deleting the
+// array value and re-indexing makes it usable again.
+// Part 3: Shard new collection on {skey : 1} but with a longer index.
+// Insert docs with same val for 'skey' but different vals for 'extra'.
+// Move chunks around and check that [min,max) chunk boundaries are properly obeyed.
var s = new ShardingTest({ name : jsTestName(), shards : 2 });
var db = s.getDB( "test" );
var admin = s.getDB( "admin" );
-var coll = db.foo;
+var config = s.getDB( "config" );
+var shards = config.shards.find().toArray();
+var shard0 = new Mongo( shards[0].host );
+var shard1 = new Mongo( shards[1].host );
s.adminCommand( { enablesharding : "test" } );
+//******************Part 1********************
+
+var coll = db.foo;
+
var longStr = 'a';
while ( longStr.length < 1024 * 128 ) { longStr += longStr; }
for( i=0 ; i<100; i++){
@@ -54,6 +68,8 @@ printjson( result3 );
assert.eq( 1, result3.ok , "moveChunk didn't succeed");
+//******************Part 2********************
+
// Test that inserting array values fails because we don't support multi-key indexes for the shard key
coll.save({ num : [1,2], x : 1});
err = db.getLastError();
@@ -76,5 +92,60 @@ var result4 = admin.runCommand( { movechunk : coll.getFullName() , find : { num
printjson( result4 );
assert.eq( 1, result4.ok , "moveChunk failed after rebuilding index");
+//******************Part 3********************
+
+// Check chunk boundaries obeyed when using prefix shard key.
+// This test repeats with shard key as the prefix of different longer indices.
+
+for( i=0; i < 3; i++ ){
+
+ // setup new collection on shard0
+ var coll2 = db.foo2;
+ coll2.drop();
+ var moveRes = admin.runCommand( { movePrimary : coll2.getDB() + "", to : shards[0]._id } );
+ assert.eq( moveRes.ok , 1 , "primary not moved correctly" );
+
+ // declare a longer index
+ if ( i == 0 ) {
+ coll2.ensureIndex( { skey : 1, extra : 1 } );
+ }
+ else if ( i == 1 ) {
+ coll2.ensureIndex( { skey : 1, extra : -1 } );
+ }
+ else if ( i == 2 ) {
+ coll2.ensureIndex( { skey : 1, extra : 1 , superfluous : -1 } );
+ }
+ db.getLastError();
+
+ // then shard collection on prefix
+ var shardRes = admin.runCommand( { shardCollection : coll2 + "", key : { skey : 1 } } );
+ assert.eq( shardRes.ok , 1 , "collection not sharded" );
+
+ // insert docs with same value for skey
+ for( var i = 0; i < 5; i++ ){
+ for( var j = 0; j < 5; j++ ){
+ coll2.insert( { skey : 0, extra : i , superfluous : j } );
+ }
+ }
+ assert.eq( null, coll2.getDB().getLastError() , "inserts didn't work" );
+
+ // split on that key, and check it makes 2 chunks
+ var splitRes = admin.runCommand( { split : coll2 + "", middle : { skey : 0 } } );
+ assert.eq( splitRes.ok , 1 , "split didn't work" );
+ assert.eq( config.chunks.find( { ns : coll2.getFullName() } ).count() , 2 );
+
+ // movechunk should move ALL docs since they have same value for skey
+ var moveRes = admin.runCommand( { moveChunk : coll2 + "", find : { skey : 0 }, to : shards[1]._id } );
+ assert.eq( moveRes.ok , 1 , "movechunk didn't work" );
+
+ // check no orphaned docs on the shards
+ assert.eq( 0 , shard0.getCollection( coll2 + "" ).find().itcount() );
+ assert.eq( 25 , shard1.getCollection( coll2 + "" ).find().itcount() );
+
+ // and check total
+ assert.eq( 25 , coll2.find().itcount() , "bad total number of docs after move" );
+
+ db.printShardingStatus();
+}
s.stop();
View
4 jstests/sharding/read_pref.js
@@ -112,12 +112,14 @@ assert.eq( primaryNode.name, explain.server );
assert.eq( 1, explain.n );
// Kill all members except one
+var stoppedNodes = [];
for ( var x = 0; x < NODES - 1; x++ ){
replTest.stop( x );
+ stoppedNodes.push( replTest.nodes[x] );
}
// Wait for ReplicaSetMonitor to realize nodes are down
-ReplSetTest.awaitRSClientHosts( conn, replTest.nodes[0], { ok: false }, replTest.name );
+ReplSetTest.awaitRSClientHosts( conn, stoppedNodes, { ok: false }, replTest.name );
// Wait for the last node to be in steady state -> secondary (not recovering)
var lastNode = replTest.nodes[NODES - 1];
View
1  jstests/sharding/read_pref_rs_client.js
@@ -2,6 +2,7 @@
* Testing read preference on DBClientReplicaSets, specifically on the auto-retry
* and automatic failover selection
*/
+// NOTE: this test is skipped when running smoke.py with --auth because of SERVER-6972
function basicTest() {
var replTest = new ReplSetTest({ name: 'basic', nodes: 2, useHostName: true });
View
115 jstests/sharding/writeback_bulk_insert.js
@@ -0,0 +1,115 @@
+//
+// Tests whether a writeback error during bulk insert hangs GLE
+//
+
+jsTest.log("Starting sharded cluster...")
+
+var st = new ShardingTest({shards : 1,
+ mongos : 3,
+ verbose : 2,
+ separateConfig : 1})
+
+st.stopBalancer()
+
+var mongosA = st.s0
+var mongosB = st.s1
+var mongosC = st.s2
+
+jsTest.log("Adding new collection...")
+
+var collA = mongosA.getCollection(jsTestName() + ".coll")
+collA.insert({hello : "world"})
+assert.eq(null, collA.getDB().getLastError())
+
+var collB = mongosB.getCollection("" + collA)
+collB.insert({hello : "world"})
+assert.eq(null, collB.getDB().getLastError())
+
+var collC = mongosB.getCollection("" + collA)
+collC.insert({hello : "world"})
+assert.eq(null, collC.getDB().getLastError())
+
+jsTest.log("Enabling sharding...")
+
+printjson(mongosA.getDB("admin").runCommand({enableSharding : collA.getDB()
+ + ""}))
+printjson(mongosA.getDB("admin").runCommand({shardCollection : collA + "",
+ key : {_id : 1}}))
+
+// MongoD doesn't know about the config shard version *until* MongoS tells it
+collA.findOne()
+
+// Preparing insert of exactly 16MB
+
+jsTest.log("Preparing bulk insert...")
+
+var data1MB = "x"
+while (data1MB.length < 1024 * 1024)
+ data1MB += data1MB;
+
+var data7MB = ""
+// Data now at 7MB
+for ( var i = 0; i < 7; i++)
+ data7MB += data1MB;
+
+print("7MB object size is : " + Object.bsonsize({_id : 0,
+ d : data7MB}))
+
+var dataCloseTo8MB = data7MB;
+// WARNING - MAGIC NUMBERS HERE
+// The idea is to exceed the 16MB limit by just enough so that the message gets passed in the
+// shell, but adding additional writeback information fails.
+for ( var i = 0; i < 1031 * 1024 + 862; i++) {
+ dataCloseTo8MB += "x"
+}
+
+print("Object size is: " + Object.bsonsize([{_id : 0,
+ d : dataCloseTo8MB},
+ {_id : 1,
+ d : dataCloseTo8MB}]))
+
+jsTest.log("Trigger wbl for mongosB...")
+
+collB.insert([{_id : 0,
+ d : dataCloseTo8MB},
+ {_id : 1,
+ d : dataCloseTo8MB}])
+
+// Will hang if overflow is not detected correctly
+jsTest.log("Waiting for GLE...")
+
+assert.neq(null, collB.getDB().getLastError())
+
+print("GLE correctly returned error...")
+
+assert.eq(3, collA.find().itcount())
+assert.eq(3, collB.find().itcount())
+
+var data8MB = data8MB;
+for ( var i = 0; i < 1024 * 1024; i++) {
+ data8MB += "x"
+}
+
+print("Object size is: " + Object.bsonsize([{_id : 0,
+ d : data8MB},
+ {_id : 1,
+ d : data8MB}]))
+
+jsTest.log("Trigger wbl for mongosC...")
+
+collC.insert([{_id : 0,
+ d : data8MB},
+ {_id : 1,
+ d : data8MB}])
+
+// Should succeed since our insert size is 16MB (plus very small overhead)
+jsTest.log("Waiting for GLE...")
+
+assert.eq(null, collC.getDB().getLastError())
+
+print("GLE Successful...")
+
+assert.eq(5, collA.find().itcount())
+assert.eq(5, collB.find().itcount())
+
+st.stop()
View
2  jstests/slowNightly/balance_repl.js
@@ -1,7 +1,7 @@
s = new ShardingTest( "rs1" , 2 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { rs : true , numReplicas : 2 , chunksize : 1 , nopreallocj : true } )
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true, _nosleep: true, replThrottle : true } } , true );
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true, _nosleep: true, _secondaryThrottle : true } } , true );
db = s.getDB( "test" );
View
7 jstests/slowNightly/balance_tags1.js
@@ -1,5 +1,5 @@
-s = new ShardingTest( "balance_tags1" , 3 , 1 , 1 , { chunksize : 1 , nopreallocj : true } )
+s = new ShardingTest( "balance_tags1" , 3 , 1 , 1 , { sync:true, chunksize : 1 , nopreallocj : true } )
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false, _nosleep: true } } , true );
db = s.getDB( "test" );
@@ -11,9 +11,13 @@ db.getLastError();
s.adminCommand( { enablesharding : "test" } )
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+s.stopBalancer();
+
for ( i=0; i<20; i++ )
s.adminCommand( { split : "test.foo" , middle : { _id : i } } );
+s.startBalancer();
+
sh.status( true )
assert.soon( function() {
counts = s.chunkCounts( "foo" );
@@ -42,6 +46,7 @@ assert.soon( function() {
return counts["shard0002"] == 0;
} , "balance 2 didn't happen" , 1000 * 60 * 10 , 1000 )
+printjson(sh.status());
s.stop();
View
0  jstests/memory.js → jstests/slowNightly/memory.js
File renamed without changes
View
24 jstests/splitvector.js
@@ -258,23 +258,47 @@ f.ensureIndex( { x: 1, y: 1 } );
case4();
f.drop();
+f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+case4();
+
+f.drop();
f.ensureIndex( { x: 1, y: 1 } );
case5();
f.drop();
+f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+case5();
+
+f.drop();
f.ensureIndex( { x: 1, y: 1 } );
case6();
f.drop();
+f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+case6();
+
+f.drop();
f.ensureIndex( { x: 1, y: 1 } );
case7();
f.drop();
+f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+case7();
+
+f.drop();
f.ensureIndex( { x: 1, y: 1 } );
case8();
f.drop();
+f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+case8();
+
+f.drop();
f.ensureIndex( { x: 1, y: 1 } );
case9();
+f.drop();
+f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+case9();
+
print("PASSED");
View
89 jstests/tool/tool_replset.js
@@ -0,0 +1,89 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+ * 9. Add data to the oplog.rs collection.
+ * 10. Ensure that the document doesn't exist yet.
+ * 11. Now play the mongooplog tool.
+ * 12. Make sure that the oplog was played
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = "/data/db/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = "/data/db/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+// Test with mongooplog
+var doc = { _id : 5, x : 17 };
+master.getDB("local").oplog.rs.insert({ ts : new Timestamp(), "op" : "i", "ns" : "foo.bar",
+ "o" : doc });
+
+assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running mongooplog " +
+ "was not 100 as expected");
+
+runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0],
+ "--host", replSetConnString);
+
+print("running mongooplog to replay the oplog")
+
+assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running mongooplog " +
+ "was not 101 as expected")
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
View
306 rpm/mongo.spec 100644 → 100755
@@ -1,156 +1,150 @@
-Name: mongo-10gen
-Conflicts: mongo, mongo-10gen-unstable
-Obsoletes: mongo-stable
-Version: 2.1.2
-Release: mongodb_1%{?dist}
-Summary: mongodb client shell and tools
-License: AGPL 3.0
-URL: http://www.mongodb.org
-Group: Applications/Databases
-
-Source0: %{name}-%{version}.tar.gz
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
-BuildRequires: js-devel, readline-devel, boost-devel, pcre-devel
-BuildRequires: gcc-c++, scons
-
-%description
-MongoDB (from "huMONGOus") is a schema-free document-oriented database.
-It features dynamic profileable queries, full indexing, replication
-and fail-over support, efficient storage of large binary data objects,
-and auto-sharding.
-
-This package provides the mongo shell, import/export tools, and other
-client utilities.
-
-%package server
-Summary: mongodb server, sharding server, and support scripts
-Group: Applications/Databases
-Requires: mongo
-Requires(pre): /usr/sbin/useradd
-Requires(pre): /usr/sbin/groupadd
-Requires(post): chkconfig
-Requires(preun): chkconfig
-
-%description server
-MongoDB (from "huMONGOus") is a schema-free document-oriented database.
-
-This package provides the mongo server software, mongo sharding server
-software, default configuration files, and init.d scripts.
-
-%package devel
-Summary: Headers and libraries for mongodb development.
-Group: Applications/Databases
-
-%description devel
-MongoDB (from "huMONGOus") is a schema-free document-oriented database.
-
-This package provides the mongo static library and header files needed
-to develop mongo client software.
-
-%prep
-%setup
-
-%build
-scons -%{?_smp_mflags} -prefix=$RPM_BUILD_ROOT/usr all
-# XXX really should have shared library here
-
-%install
-scons --prefix=$RPM_BUILD_ROOT/usr install
-mkdir -p $RPM_BUILD_ROOT/usr
-cp -rv BINARIES/usr/bin $RPM_BUILD_ROOT/usr
-mkdir -p $RPM_BUILD_ROOT/usr/share/man/man1
-cp debian/*.1 $RPM_BUILD_ROOT/usr/share/man/man1/
-# FIXME: remove this rm when mongosniff is back in the package
-rm -v $RPM_BUILD_ROOT/usr/share/man/man1/mongosniff.1*
-mkdir -p $RPM_BUILD_ROOT/etc/rc.d/init.d
-cp -v rpm/init.d-mongod $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
-chmod a+x $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
-mkdir -p $RPM_BUILD_ROOT/etc
-cp -v rpm/mongod.conf $RPM_BUILD_ROOT/etc/mongod.conf
-mkdir -p $RPM_BUILD_ROOT/etc/sysconfig
-cp -v rpm/mongod.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/mongod
-mkdir -p $RPM_BUILD_ROOT/var/lib/mongo
-mkdir -p $RPM_BUILD_ROOT/var/log/mongo
-touch $RPM_BUILD_ROOT/var/log/mongo/mongod.log
-
-%clean
-scons -c
-rm -rf $RPM_BUILD_ROOT
-
-%pre server
-if ! /usr/bin/id -g mongod &>/dev/null; then
- /usr/sbin/groupadd -r mongod
-fi
-if ! /usr/bin/id mongod &>/dev/null; then
- /usr/sbin/useradd -M -r -g mongod -d /var/lib/mongo -s /bin/false \
- -c mongod mongod > /dev/null 2>&1
-fi
-
-%post server
-if test $1 = 1
-then
- /sbin/chkconfig --add mongod
-fi
-
-%preun server
-if test $1 = 0
-then
- /sbin/chkconfig --del mongod
-fi
-
-%postun server
-if test $1 -ge 1
-then
- /sbin/service mongod condrestart >/dev/null 2>&1 || :
-fi
-
-%files
-%defattr(-,root,root,-)
-%doc README GNU-AGPL-3.0.txt
-
-%{_bindir}/mongo
-%{_bindir}/mongodump
-%{_bindir}/mongoexport
-%{_bindir}/mongofiles
-%{_bindir}/mongoimport
-%{_bindir}/mongorestore
-%{_bindir}/mongosniff
-%{_bindir}/mongostat
-%{_bindir}/bsondump
-%{_bindir}/mongotop
-
-%{_mandir}/man1/mongo.1*
-%{_mandir}/man1/mongod.1*
-%{_mandir}/man1/mongodump.1*
-%{_mandir}/man1/mongoexport.1*
-%{_mandir}/man1/mongofiles.1*
-%{_mandir}/man1/mongoimport.1*
-%{_mandir}/man1/mongosniff.1*
-%{_mandir}/man1/mongostat.1*
-%{_mandir}/man1/mongorestore.1*
-%{_mandir}/man1/bsondump.1*
-
-%files server
-%defattr(-,root,root,-)
-%config(noreplace) /etc/mongod.conf
-%{_bindir}/mongod
-%{_bindir}/mongos
-#%{_mandir}/man1/mongod.1*
-%{_mandir}/man1/mongos.1*
-/etc/rc.d/init.d/mongod
-%config(noreplace) /etc/sysconfig/mongod
-#/etc/rc.d/init.d/mongos
-%attr(0755,mongod,mongod) %dir /var/lib/mongo
-%attr(0755,mongod,mongod) %dir /var/log/mongo
-%attr(0755,mongod,mongod) %dir /var/run/mongo
-%attr(0640,mongod,mongod) %config(noreplace) %verify(not md5 size mtime) /var/log/mongo/mongod.log
-
-%changelog
-* Fri Feb 17 2012 Michael A. Fiedler <michael@10gen.com>
-- Added proper pid file usage
-
-* Thu Jan 28 2010 Richard M Kreuter <richard@10gen.com>
-- Minor fixes.
-
-* Sat Oct 24 2009 Joe Miklojcik <jmiklojcik@shopwiki.com> -
-- Wrote mongo.spec.
+Name: mongo-10gen
+Conflicts: mongo, mongo-10gen-unstable
+Obsoletes: mongo-stable
+Version: 2.2.0
+Release: mongodb_1%{?dist}
+Summary: mongo client shell and tools
+License: AGPL 3.0
+URL: http://www.mongodb.org
+Group: Applications/Databases
+
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+
+%description
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+It features dynamic profileable queries, full indexing, replication
+and fail-over support, efficient storage of large binary data objects,
+and auto-sharding.
+
+This package provides the mongo shell, import/export tools, and other
+client utilities.
+
+%package server
+Summary: mongo server, sharding server, and support scripts
+Group: Applications/Databases
+Requires: mongo-10gen
+
+%description server
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo server software, mongo sharding server
+softwware, default configuration files, and init.d scripts.
+
+%package devel
+Summary: Headers and libraries for mongo development.
+Group: Applications/Databases
+
+%description devel
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo static library and header files needed
+to develop mongo client software.
+
+%prep
+%setup
+
+%build
+#scons --prefix=$RPM_BUILD_ROOT/usr all
+# XXX really should have shared library here
+
+%install
+#scons --prefix=$RPM_BUILD_ROOT/usr install
+mkdir -p $RPM_BUILD_ROOT/usr
+cp -rv BINARIES/usr/bin $RPM_BUILD_ROOT/usr
+mkdir -p $RPM_BUILD_ROOT/usr/share/man/man1
+cp debian/*.1 $RPM_BUILD_ROOT/usr/share/man/man1/
+# FIXME: remove this rm when mongosniff is back in the package
+rm -v $RPM_BUILD_ROOT/usr/share/man/man1/mongosniff.1*
+mkdir -p $RPM_BUILD_ROOT/etc/rc.d/init.d
+cp -v rpm/init.d-mongod $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
+chmod a+x $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
+mkdir -p $RPM_BUILD_ROOT/etc
+cp -v rpm/mongod.conf $RPM_BUILD_ROOT/etc/mongod.conf
+mkdir -p $RPM_BUILD_ROOT/etc/sysconfig
+cp -v rpm/mongod.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/mongod
+mkdir -p $RPM_BUILD_ROOT/var/lib/mongo
+mkdir -p $RPM_BUILD_ROOT/var/log/mongo
+touch $RPM_BUILD_ROOT/var/log/mongo/mongod.log
+
+%clean
+#scons -c
+rm -rf $RPM_BUILD_ROOT
+
+%pre server
+if ! /usr/bin/id -g mongod &>/dev/null; then
+ /usr/sbin/groupadd -r mongod
+fi
+if ! /usr/bin/id mongod &>/dev/null; then
+ /usr/sbin/useradd -M -r -g mongod -d /var/lib/mongo -s /bin/false -c mongod mongod > /dev/null 2>&1
+fi
+
+%post server
+if test $1 = 1
+then
+ /sbin/chkconfig --add mongod
+fi
+
+%preun server
+if test $1 = 0
+then
+ /sbin/chkconfig --del mongod
+fi
+
+%postun server
+if test $1 -ge 1
+then
+ /sbin/service mongod condrestart >/dev/null 2>&1 || :
+fi
+
+%files
+%defattr(-,root,root,-)
+#%doc README GNU-AGPL-3.0.txt
+
+%{_bindir}/bsondump
+%{_bindir}/mongo
+%{_bindir}/mongodump
+%{_bindir}/mongoexport
+%{_bindir}/mongofiles
+%{_bindir}/mongoimport
+%{_bindir}/mongooplog
+%{_bindir}/mongoperf
+%{_bindir}/mongorestore
+%{_bindir}/mongotop
+%{_bindir}/mongostat
+# FIXME: uncomment when mongosniff is back in the package
+#%{_bindir}/mongosniff
+
+# FIXME: uncomment this when there's a stable release whose source
+# tree contains a bsondump man page.
+%{_mandir}/man1/bsondump.1*
+%{_mandir}/man1/mongo.1*
+%{_mandir}/man1/mongodump.1*
+%{_mandir}/man1/mongoexport.1*
+%{_mandir}/man1/mongofiles.1*
+%{_mandir}/man1/mongoimport.1*
+%{_mandir}/man1/mongorestore.1*
+%{_mandir}/man1/mongostat.1*
+# FIXME: uncomment when mongosniff is back in the package
+#%{_mandir}/man1/mongosniff.1*
+
+%files server
+%defattr(-,root,root,-)
+%config(noreplace) /etc/mongod.conf
+%{_bindir}/mongod
+%{_bindir}/mongos
+%{_mandir}/man1/mongod.1*
+%{_mandir}/man1/mongos.1*
+/etc/rc.d/init.d/mongod
+/etc/sysconfig/mongod
+#/etc/rc.d/init.d/mongos
+%attr(0755,mongod,mongod) %dir /var/lib/mongo
+%attr(0755,mongod,mongod) %dir /var/log/mongo
+%attr(0640,mongod,mongod) %config(noreplace) %verify(not md5 size mtime) /var/log/mongo/mongod.log
+
+%changelog
+* Thu Jan 28 2010 Richard M Kreuter <richard@10gen.com>
+- Minor fixes.
+
+* Sat Oct 24 2009 Joe Miklojcik <jmiklojcik@shopwiki.com> -
+- Wrote mongo.spec.
View
4 site_scons/libdeps.py
@@ -138,13 +138,13 @@ def get_libdeps(source, target, env, for_signature):
"""
target = env.Flatten([target])
- return list(__get_libdeps(target[0], 'LIBDEPS'))
+ return sorted_by_str(__get_libdeps(target[0], 'LIBDEPS'))
def get_libdeps_objs(source, target, env, for_signature):
objs = set()
for lib in get_libdeps(source, target, env, for_signature):
objs.update(lib.sources_set)
- return list(objs)
+ return sorted_by_str(objs)
def get_libdeps_special_sun(source, target, env, for_signature):
x = get_libdeps(source, target, env, for_signature )
View
22 src/mongo/SConscript
@@ -13,7 +13,8 @@ Import("usesm usev8")
Import("installSetup")
Import("darwin windows solaris linux nix")
-env.SConscript(['platform/SConscript',
+env.SConscript(['base/SConscript',
+ 'platform/SConscript',
'unittest/SConscript'])
def add_exe( v ):
@@ -42,6 +43,7 @@ env.StaticLibrary('md5', [
])
env.StaticLibrary('bson', [
+ 'util/safe_num.cpp',
'bson/oid.cpp',
'db/nonce.cpp',
'db/jsobj.cpp',
@@ -51,6 +53,9 @@ env.StaticLibrary('bson', [
'stringutils',
])
+env.CppUnitTest('safe_num_test', ['util/safe_num_test.cpp'],
+ LIBDEPS=['bson'])
+
commonFiles = [ "pch.cpp",
"buildinfo.cpp",
"db/hasher.cpp",
@@ -440,11 +445,22 @@ if has_option( "sharedclient" ):
# dbtests test binary
env.StaticLibrary('testframework', ['dbtests/framework.cpp'], LIBDEPS=['unittest/unittest'])
+env.StaticLibrary('mocklib', [f for f in Glob( "dbtests/mock/*.cpp" )],
+ LIBDEPS=['unittest/unittest', 'mongocommon'])
test = testEnv.Install(
'#/',
- testEnv.Program( "test", [ f for f in Glob( "dbtests/*.cpp" ) if not str( f ).endswith( 'framework.cpp' ) ],
- LIBDEPS=["mongocommon", "serveronly", "coreserver", "coredb", "testframework", "gridfs", "notmongodormongos" ] ) )
+ testEnv.Program("test",
+ [ f for f in Glob("dbtests/*.cpp") if not str(f).endswith('framework.cpp') ],
+ LIBDEPS = [
+ "mongocommon",
+ "serveronly",
+ "coreserver",
+ "coredb",
+ "testframework",
+ "gridfs",
+ "notmongodormongos",
+ "mocklib"]))
if len(testEnv.subst('$PROGSUFFIX')):
testEnv.Alias( "test", "#/${PROGPREFIX}test${PROGSUFFIX}" )
View
18 src/mongo/base/SConscript
@@ -0,0 +1,18 @@
+# -*- mode: python -*-
+
+Import("env")
+
+env.StaticLibrary('base', ['initializer_dependency_graph.cpp',
+ 'make_string_vector.cpp',
+ 'status.cpp'])
+
+env.CppUnitTest('initializer_dependency_graph_test',
+ ['initializer_dependency_graph_test.cpp'],
+ LIBDEPS=['base'])
+
+env.CppUnitTest('owned_pointer_vector_test',
+ ['owned_pointer_vector_test.cpp'],
+ LIBDEPS=['base'])
+
+env.CppUnitTest('status_test', 'status_test.cpp',
+ LIBDEPS=['base'])
View
33 src/mongo/base/disallow_copying.h
@@ -0,0 +1,33 @@
+/* Copyright 2012 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * Instruct the compiler not to create default copy constructor and assignment operator
+ * for class "CLASS". Must be the _first_ or _last_ line of the class declaration. Prefer
+ * to use it as the first line.
+ *
+ * Usage:
+ * class Foo {
+ * MONGO_DISALLOW_COPYING(Foo);
+ * public:
+ * ...
+ * };
+ */
+#define MONGO_DISALLOW_COPYING(CLASS) \
+ private: \
+ CLASS(const CLASS&); \
+ CLASS& operator=(const CLASS&)
View
91 src/mongo/base/error_codes.h
@@ -0,0 +1,91 @@
+/* Copyright 2012 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+
+namespace mongo {
+
+ /**
+ * This is a generated class containg a table of error codes and their corresponding
+ * error strings. The class is derived from the definitions in the error_codes.err
+ * file.
+ *
+ * TODO: Do not update this file directly. Update error_codes.err instead.
+ *
+ * # Error table
+ * [OK, "ok"]
+ * [InternalError, 1]
+ * [BadValue, 2]
+ * ...
+ * [HostUnreachable, <nnnn>]
+ * [HostNotFound, <nnnn>]
+ *
+ * # Error classes
+ * [NetworkError, [HostUnreachable, HostNotFound]]
+ *
+ */
+ class ErrorCodes {
+ public:
+ enum Error {
+ OK = 0,
+ InternalError = 1,
+ BadValue = 2,
+ DuplicateKey = 3,
+ NoSuchKey = 4,
+ GraphContainsCycle = 5,
+ HostUnreachable = 6,
+ HostNotFound = 7,
+ MaxError
+ };
+
+ static const char* errorString(Error err) {
+ switch (err) {
+ case OK:
+ return "OK";
+ case InternalError:
+ return "InternalError";
+ case BadValue:
+ return "BadValue";
+ case NoSuchKey:
+ return "NoSuchKey";
+ case HostUnreachable:
+ return "HostUnreachable";
+ case HostNotFound:
+ return "HostNotFound";
+ case DuplicateKey:
+ return "DuplicateKey";
+ case GraphContainsCycle:
+ return "GraphContainsCycle";
+ default:
+ return "Unknown error code";
+ }
+ }
+
+ static bool isNetworkError(Error err) {
+ switch (err) {
+ case HostUnreachable:
+ case HostNotFound:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ };
+
+} // namespace mongo
+
View
149 src/mongo/base/initializer_dependency_graph.cpp
@@ -0,0 +1,149 @@
+/* Copyright 2012 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mongo/base/initializer_dependency_graph.h"
+
+#include <algorithm>
+#include <iterator>
+
+namespace mongo {
+
+ InitializerDependencyGraph::InitializerDependencyGraph() {}
+ InitializerDependencyGraph::~InitializerDependencyGraph() {}
+
+ Status InitializerDependencyGraph::addInitializer(const std::string& name,
+ const InitializerFunction& fn,
+ const std::vector<std::string>& prerequisites,
+ const std::vector<std::string>& dependents) {
+ if (!fn)
+ return Status(ErrorCodes::BadValue, "Illegal to supply a NULL function");
+
+ NodeData& newNode = _nodes[name];
+ if (newNode.fn) {
+ return Status(ErrorCodes::DuplicateKey, name);
+ }
+
+ newNode.fn = fn;
+
+ for (size_t i = 0; i < prerequisites.size(); ++i) {
+ newNode.prerequisites.insert(prerequisites[i]);
+ }
+
+ for (size_t i = 0; i < dependents.size(); ++i) {
+ _nodes[dependents[i]].prerequisites.insert(name);
+ }
+
+ return Status::OK;
+ }
+
+ InitializerFunction InitializerDependencyGraph::getInitializerFunction(
+ const std::string& name) const {
+
+ NodeMap::const_iterator iter = _nodes.find(name);
+ if (iter == _nodes.end())
+ return InitializerFunction();
+ return iter->second.fn;
+ }
+
+ Status InitializerDependencyGraph::topSort(std::vector<std::string>* sortedNames) const {
+ /*
+ * This top-sort is implemented by performing a depth-first traversal of the dependency
+ * graph, once for each node. "visitedNodeNames" tracks the set of node names ever visited,
+ * and it is used to prune each DFS. A node that has been visited once on any DFS is never
+ * visited again. Complexity of this implementation is O(n+m) where "n" is the number of
+ * nodes and "m" is the number of prerequisite edges. Space complexity is O(n), in both
+ * stack space and size of the "visitedNodeNames" set.
+ *
+ * "inProgressNodeNames" is used to detect and report cycles.
+ */
+
+ std::vector<std::string> inProgressNodeNames;
+ unordered_set<std::string> visitedNodeNames;
+
+ sortedNames->clear();
+ for (NodeMap::const_iterator iter = _nodes.begin(), end = _nodes.end();
+ iter != end; ++iter) {
+
+ Status status = recursiveTopSort(_nodes,
+ *iter,
+ &inProgressNodeNames,
+ &visitedNodeNames,
+ sortedNames);
+ if (Status::OK != status)
+ return status;
+ }
+ return Status::OK;
+ }
+
+ Status InitializerDependencyGraph::recursiveTopSort(
+ const NodeMap& nodeMap,
+ const Node& currentNode,
+ std::vector<std::string>* inProgressNodeNames,
+ unordered_set<std::string>* visitedNodeNames,
+ std::vector<std::string>* sortedNames) {
+
+ /*
+ * The top sort is performed by depth-first traversal starting at each node in the
+ * dependency graph, short-circuited any time a node is seen that has already been visited
+ * in any traversal. "visitedNodeNames" is the set of nodes that have been successfully
+ * visited, while "inProgressNodeNames" are nodes currently in the exploration chain. This
+ * structure is kept explicitly to facilitate cycle detection.
+ *
+ * This function implements a depth-first traversal, and is called once for each node in the
+ * graph by topSort(), above.
+ */
+
+ if ((*visitedNodeNames).count(currentNode.first))
+ return Status::OK;
+
+ if (!currentNode.second.fn)
+ return Status(ErrorCodes::BadValue, currentNode.first);
+
+ inProgressNodeNames->push_back(currentNode.first);
+
+ std::vector<std::string>::iterator firstOccurence = std::find(
+ inProgressNodeNames->begin(), inProgressNodeNames->end(), currentNode.first);
+ if (firstOccurence + 1 != inProgressNodeNames->end()) {
+ sortedNames->clear();
+ std::copy(firstOccurence, inProgressNodeNames->end(), std::back_inserter(*sortedNames));
+ return Status(ErrorCodes::GraphContainsCycle, "Cycle in dependency graph");
+ }
+
+ for (unordered_set<std::string>::const_iterator
+ iter = currentNode.second.prerequisites.begin(),
+ end = currentNode.second.prerequisites.end();
+ iter != end; ++iter) {
+
+ NodeMap::const_iterator nextNode = nodeMap.find(*iter);
+ if (nextNode == nodeMap.end())
+ return Status(ErrorCodes::BadValue, *iter);
+
+ Status status = recursiveTopSort(nodeMap,
+ *nextNode,
+ inProgressNodeNames,
+ visitedNodeNames,
+ sortedNames);
+ if (Status::OK != status)
+ return status;
+ }
+ sortedNames->push_back(currentNode.first);
+ if (inProgressNodeNames->back() != currentNode.first)
+ return Status(ErrorCodes::InternalError, "inProgressNodeNames stack corrupt");
+ inProgressNodeNames->pop_back();
+ visitedNodeNames->insert(currentNode.first);
+ return Status::OK;
+ }
+
+} // namespace mongo
View
118 src/mongo/base/initializer_dependency_graph.h
@@ -0,0 +1,118 @@
+/* Copyright 2012 10gen Inc.
+ *