-
Notifications
You must be signed in to change notification settings - Fork 5.6k
/
d_migrate.cpp
1544 lines (1228 loc) · 54 KB
/
d_migrate.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// d_migrate.cpp
/**
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
these are commands that live in mongod
mostly around shard management and checking
*/
#include "pch.h"
#include <map>
#include <string>
#include <algorithm>
#include "../db/commands.h"
#include "../db/jsobj.h"
#include "../db/dbmessage.h"
#include "../db/query.h"
#include "../db/cmdline.h"
#include "../db/queryoptimizer.h"
#include "../db/btree.h"
#include "../db/repl_block.h"
#include "../db/dur.h"
#include "../client/connpool.h"
#include "../client/distlock.h"
#include "../util/queue.h"
#include "../util/unittest.h"
#include "../util/processinfo.h"
#include "shard.h"
#include "d_logic.h"
#include "config.h"
#include "chunk.h"
using namespace std;
namespace mongo {
class MoveTimingHelper {
public:
MoveTimingHelper( const string& where , const string& ns , BSONObj min , BSONObj max , int total )
: _where( where ) , _ns( ns ) , _next( 0 ) , _total( total ) {
_nextNote = 0;
_b.append( "min" , min );
_b.append( "max" , max );
}
~MoveTimingHelper() {
// even if logChange doesn't throw, bson does
// sigh
try {
if ( _next != _total ) {
note( "aborted" );
}
configServer.logChange( (string)"moveChunk." + _where , _ns, _b.obj() );
}
catch ( const std::exception& e ) {
log( LL_WARNING ) << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << endl;
}
}
void done( int step ) {
assert( step == ++_next );
assert( step <= _total );
stringstream ss;
ss << "step" << step;
string s = ss.str();
CurOp * op = cc().curop();
if ( op )
op->setMessage( s.c_str() );
else
log( LL_WARNING ) << "op is null in MoveTimingHelper::done" << endl;
_b.appendNumber( s , _t.millis() );
_t.reset();
#if 0
// debugging for memory leak?
ProcessInfo pi;
ss << " v:" << pi.getVirtualMemorySize()
<< " r:" << pi.getResidentSize();
log() << ss.str() << endl;
#endif
}
void note( const string& s ) {
string field = "note";
if ( _nextNote > 0 ) {
StringBuilder buf;
buf << "note" << _nextNote;
field = buf.str();
}
_nextNote++;
_b.append( field , s );
}
private:
Timer _t;
string _where;
string _ns;
int _next;
int _total; // expected # of steps
int _nextNote;
BSONObjBuilder _b;
};
struct OldDataCleanup {
static AtomicUInt _numThreads; // how many threads are doing async cleanusp
string ns;
BSONObj min;
BSONObj max;
set<CursorId> initial;
OldDataCleanup(){
_numThreads++;
}
OldDataCleanup( const OldDataCleanup& other ) {
ns = other.ns;
min = other.min.getOwned();
max = other.max.getOwned();
initial = other.initial;
_numThreads++;
}
~OldDataCleanup(){
_numThreads--;
}
void doRemove() {
ShardForceVersionOkModeBlock sf;
writelock lk(ns);
RemoveSaver rs("moveChunk",ns,"post-cleanup");
long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 );
log() << "moveChunk deleted: " << num << endl;
}
};
AtomicUInt OldDataCleanup::_numThreads = 0;
static const char * const cleanUpThreadName = "cleanupOldData";
void _cleanupOldData( OldDataCleanup cleanup ) {
Client::initThread( cleanUpThreadName );
log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl;
int loops = 0;
Timer t;
while ( t.seconds() < 900 ) { // 15 minutes
assert( dbMutex.getState() == 0 );
sleepmillis( 20 );
set<CursorId> now;
ClientCursor::find( cleanup.ns , now );
set<CursorId> left;
for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) {
CursorId id = *i;
if ( now.count(id) )
left.insert( id );
}
if ( left.size() == 0 )
break;
cleanup.initial = left;
if ( ( loops++ % 200 ) == 0 ) {
log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl;
stringstream ss;
for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) {
CursorId id = *i;
ss << id << " ";
}
log() << " cursors: " << ss.str() << endl;
}
}
cleanup.doRemove();
cc().shutdown();
}
void cleanupOldData( OldDataCleanup cleanup ) {
try {
_cleanupOldData( cleanup );
}
catch ( std::exception& e ) {
log() << " error cleaning old data:" << e.what() << endl;
}
catch ( ... ) {
log() << " unknown error cleaning old data" << endl;
}
}
class ChunkCommandHelper : public Command {
public:
ChunkCommandHelper( const char * name )
: Command( name ) {
}
virtual void help( stringstream& help ) const {
help << "internal - should not be called directly" << endl;
}
virtual bool slaveOk() const { return false; }
virtual bool adminOnly() const { return true; }
virtual LockType locktype() const { return NONE; }
};
bool isInRange( const BSONObj& obj , const BSONObj& min , const BSONObj& max ) {
BSONObj k = obj.extractFields( min, true );
return k.woCompare( min ) >= 0 && k.woCompare( max ) < 0;
}
class MigrateFromStatus {
public:
MigrateFromStatus() : _m("MigrateFromStatus") {
_active = false;
_inCriticalSection = false;
_memoryUsed = 0;
}
void start( string ns , const BSONObj& min , const BSONObj& max ) {
scoped_lock l(_m); // reads and writes _active
assert( ! _active );
assert( ! min.isEmpty() );
assert( ! max.isEmpty() );
assert( ns.size() );
_ns = ns;
_min = min;
_max = max;
assert( _cloneLocs.size() == 0 );
assert( _deleted.size() == 0 );
assert( _reload.size() == 0 );
assert( _memoryUsed == 0 );
_active = true;
}
void done() {
readlock lk( _ns );
_deleted.clear();
_reload.clear();
_cloneLocs.clear();
_memoryUsed = 0;
scoped_lock l(_m);
_active = false;
_inCriticalSection = false;
}
void logOp( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ) {
if ( ! _getActive() )
return;
if ( _ns != ns )
return;
// no need to log if this is not an insertion, an update, or an actual deletion
// note: opstr 'db' isn't a deletion but a mention that a database exists (for replication
// machinery mostly)
char op = opstr[0];
if ( op == 'n' || op =='c' || ( op == 'd' && opstr[1] == 'b' ) )
return;
BSONElement ide;
if ( patt )
ide = patt->getField( "_id" );
else
ide = obj["_id"];
if ( ide.eoo() ) {
log( LL_WARNING ) << "logOpForSharding got mod with no _id, ignoring obj: " << obj << endl;
return;
}
BSONObj it;
switch ( opstr[0] ) {
case 'd': {
if ( getThreadName() == cleanUpThreadName ) {
// we don't want to xfer things we're cleaning
// as then they'll be deleted on TO
// which is bad
return;
}
// can't filter deletes :(
_deleted.push_back( ide.wrap() );
_memoryUsed += ide.size() + 5;
return;
}
case 'i':
it = obj;
break;
case 'u':
if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ) {
log( LL_WARNING ) << "logOpForSharding couldn't find: " << ide << " even though should have" << endl;
return;
}
break;
}
if ( ! isInRange( it , _min , _max ) )
return;
_reload.push_back( ide.wrap() );
_memoryUsed += ide.size() + 5;
}
void xfer( list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ) {
const long long maxSize = 1024 * 1024;
if ( l->size() == 0 || size > maxSize )
return;
BSONArrayBuilder arr(b.subarrayStart(name));
list<BSONObj>::iterator i = l->begin();
while ( i != l->end() && size < maxSize ) {
BSONObj t = *i;
if ( explode ) {
BSONObj it;
if ( Helpers::findById( cc() , _ns.c_str() , t, it ) ) {
arr.append( it );
size += it.objsize();
}
}
else {
arr.append( t );
}
i = l->erase( i );
size += t.objsize();
}
arr.done();
}
/**
* called from the dest of a migrate
* transfers mods from src to dest
*/
bool transferMods( string& errmsg , BSONObjBuilder& b ) {
if ( ! _getActive() ) {
errmsg = "no active migration!";
return false;
}
long long size = 0;
{
readlock rl( _ns );
Client::Context cx( _ns );
xfer( &_deleted , b , "deleted" , size , false );
xfer( &_reload , b , "reload" , size , true );
}
b.append( "size" , size );
return true;
}
/**
* Get the disklocs that belong to the chunk migrated and sort them in _cloneLocs (to avoid seeking disk later)
*
* @param maxChunkSize number of bytes beyond which a chunk's base data (no indices) is considered too large to move
* @param errmsg filled with textual description of error if this call return false
* @return false if approximate chunk size is too big to move or true otherwise
*/
bool storeCurrentLocs( long long maxChunkSize , string& errmsg , BSONObjBuilder& result ) {
readlock l( _ns );
Client::Context ctx( _ns );
NamespaceDetails *d = nsdetails( _ns.c_str() );
if ( ! d ) {
errmsg = "ns not found, should be impossible";
return false;
}
BSONObj keyPattern;
// the copies are needed because the indexDetailsForRange destroys the input
BSONObj min = _min.copy();
BSONObj max = _max.copy();
IndexDetails *idx = indexDetailsForRange( _ns.c_str() , errmsg , min , max , keyPattern );
if ( idx == NULL ) {
errmsg = "can't find index in storeCurrentLocs";
return false;
}
scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout ,
shared_ptr<Cursor>( new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) ,
_ns ) );
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
// there's a fair amout of slack before we determine a chunk is too large because object sizes will vary
unsigned long long maxRecsWhenFull;
long long avgRecSize;
const long long totalRecs = d->stats.nrecords;
if ( totalRecs > 0 ) {
avgRecSize = d->stats.datasize / totalRecs;
maxRecsWhenFull = maxChunkSize / avgRecSize;
maxRecsWhenFull = 130 * maxRecsWhenFull / 100; // slack
}
else {
avgRecSize = 0;
maxRecsWhenFull = numeric_limits<long long>::max();
}
// do a full traversal of the chunk and don't stop even if we think it is a large chunk
// we want the number of records to better report, in that case
bool isLargeChunk = false;
unsigned long long recCount = 0;;
while ( cc->ok() ) {
DiskLoc dl = cc->currLoc();
if ( ! isLargeChunk ) {
_cloneLocs.insert( dl );
}
cc->advance();
// we can afford to yield here because any change to the base data that we might miss is already being
// queued and will be migrated in the 'transferMods' stage
if ( ! cc->yieldSometimes() ) {
break;
}
if ( ++recCount > maxRecsWhenFull ) {
isLargeChunk = true;
}
}
if ( isLargeChunk ) {
warning() << "can't move chunk of size (aprox) " << recCount * avgRecSize
<< " because maximum size allowed to move is " << maxChunkSize
<< " ns: " << _ns << " " << _min << " -> " << _max
<< endl;
result.appendBool( "chunkTooBig" , true );
result.appendNumber( "chunkSize" , (long long)(recCount * avgRecSize) );
errmsg = "chunk too big to move";
return false;
}
log() << "moveChunk number of documents: " << _cloneLocs.size() << endl;
return true;
}
bool clone( string& errmsg , BSONObjBuilder& result ) {
if ( ! _getActive() ) {
errmsg = "not active";
return false;
}
readlock l( _ns );
Client::Context ctx( _ns );
NamespaceDetails *d = nsdetails( _ns.c_str() );
assert( d );
BSONArrayBuilder a( std::min( BSONObjMaxUserSize , (int)( ( 12 + d->averageObjectSize() )* _cloneLocs.size() ) ) );
set<DiskLoc>::iterator i = _cloneLocs.begin();
for ( ; i!=_cloneLocs.end(); ++i ) {
DiskLoc dl = *i;
BSONObj o = dl.obj();
// use the builder size instead of accumulating 'o's size so that we take into consideration
// the overhead of BSONArray indices
if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
break;
}
a.append( o );
}
result.appendArray( "objects" , a.arr() );
_cloneLocs.erase( _cloneLocs.begin() , i );
return true;
}
void aboutToDelete( const Database* db , const DiskLoc& dl ) {
dbMutex.assertWriteLocked();
if ( ! _getActive() )
return;
if ( ! db->ownsNS( _ns ) )
return;
_cloneLocs.erase( dl );
}
long long mbUsed() const { return _memoryUsed / ( 1024 * 1024 ); }
bool getInCriticalSection() const { scoped_lock l(_m); return _inCriticalSection; }
void setInCriticalSection( bool b ) { scoped_lock l(_m); _inCriticalSection = b; }
bool isActive() const { return _getActive(); }
private:
mutable mongo::mutex _m; // protect _inCriticalSection and _active
bool _inCriticalSection;
bool _active;
string _ns;
BSONObj _min;
BSONObj _max;
// disk locs yet to be transferred from here to the other side
// no locking needed because build by 1 thread in a read lock
// depleted by 1 thread in a read lock
// updates applied by 1 thread in a write lock
set<DiskLoc> _cloneLocs;
list<BSONObj> _reload; // objects that were modified that must be recloned
list<BSONObj> _deleted; // objects deleted during clone that should be deleted later
long long _memoryUsed; // bytes in _reload + _deleted
bool _getActive() const { scoped_lock l(_m); return _active; }
void _setActive( bool b ) { scoped_lock l(_m); _active = b; }
} migrateFromStatus;
struct MigrateStatusHolder {
MigrateStatusHolder( string ns , const BSONObj& min , const BSONObj& max ) {
migrateFromStatus.start( ns , min , max );
}
~MigrateStatusHolder() {
migrateFromStatus.done();
}
};
void logOpForSharding( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ) {
migrateFromStatus.logOp( opstr , ns , obj , patt );
}
void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl ) {
migrateFromStatus.aboutToDelete( db , dl );
}
class TransferModsCommand : public ChunkCommandHelper {
public:
TransferModsCommand() : ChunkCommandHelper( "_transferMods" ) {}
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
return migrateFromStatus.transferMods( errmsg, result );
}
} transferModsCommand;
class InitialCloneCommand : public ChunkCommandHelper {
public:
InitialCloneCommand() : ChunkCommandHelper( "_migrateClone" ) {}
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
return migrateFromStatus.clone( errmsg, result );
}
} initialCloneCommand;
/**
* this is the main entry for moveChunk
* called to initial a move
* usually by a mongos
* this is called on the "from" side
*/
class MoveChunkCommand : public Command {
public:
MoveChunkCommand() : Command( "moveChunk" ) {}
virtual void help( stringstream& help ) const {
help << "should not be calling this directly" << endl;
}
virtual bool slaveOk() const { return false; }
virtual bool adminOnly() const { return true; }
virtual LockType locktype() const { return NONE; }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
// 1. parse options
// 2. make sure my view is complete and lock
// 3. start migrate
// in a read lock, get all DiskLoc and sort so we can do as little seeking as possible
// tell to start transferring
// 4. pause till migrate caught up
// 5. LOCK
// a) update my config, essentially locking
// b) finish migrate
// c) update config server
// d) logChange to config server
// 6. wait for all current cursors to expire
// 7. remove data locally
// -------------------------------
// 1.
string ns = cmdObj.firstElement().str();
string to = cmdObj["to"].str();
string from = cmdObj["from"].str(); // my public address, a tad redundant, but safe
BSONObj min = cmdObj["min"].Obj();
BSONObj max = cmdObj["max"].Obj();
BSONElement shardId = cmdObj["shardId"];
BSONElement maxSizeElem = cmdObj["maxChunkSizeBytes"];
if ( ns.empty() ) {
errmsg = "need to specify namespace in command";
return false;
}
if ( to.empty() ) {
errmsg = "need to specify server to move chunk to";
return false;
}
if ( from.empty() ) {
errmsg = "need to specify server to move chunk from";
return false;
}
if ( min.isEmpty() ) {
errmsg = "need to specify a min";
return false;
}
if ( max.isEmpty() ) {
errmsg = "need to specify a max";
return false;
}
if ( shardId.eoo() ) {
errmsg = "need shardId";
return false;
}
if ( maxSizeElem.eoo() || ! maxSizeElem.isNumber() ) {
errmsg = "need to specify maxChunkSizeBytes";
return false;
}
const long long maxChunkSize = maxSizeElem.numberLong(); // in bytes
if ( ! shardingState.enabled() ) {
if ( cmdObj["configdb"].type() != String ) {
errmsg = "sharding not enabled";
return false;
}
string configdb = cmdObj["configdb"].String();
shardingState.enable( configdb );
configServer.init( configdb );
}
MoveTimingHelper timing( "from" , ns , min , max , 6 /* steps */);
Shard fromShard( from );
Shard toShard( to );
log() << "received moveChunk request: " << cmdObj << endl;
timing.done(1);
// 2.
DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC ) , ns );
dist_lock_try dlk( &lockSetup , (string)"migrate-" + min.toString() );
if ( ! dlk.got() ) {
errmsg = "the collection's metadata lock is taken";
result.append( "who" , dlk.other() );
return false;
}
BSONObj chunkInfo = BSON("min" << min << "max" << max << "from" << fromShard.getName() << "to" << toShard.getName());
configServer.logChange( "moveChunk.start" , ns , chunkInfo );
ShardChunkVersion maxVersion;
string myOldShard;
{
ScopedDbConnection conn( shardingState.getConfigServer() );
BSONObj x = conn->findOne( ShardNS::chunk , Query( BSON( "ns" << ns ) ).sort( BSON( "lastmod" << -1 ) ) );
maxVersion = x["lastmod"];
BSONObj currChunk = conn->findOne( ShardNS::chunk , shardId.wrap( "_id" ) );
assert( currChunk["shard"].type() );
assert( currChunk["min"].type() );
assert( currChunk["max"].type() );
myOldShard = currChunk["shard"].String();
conn.done();
BSONObj currMin = currChunk["min"].Obj();
BSONObj currMax = currChunk["max"].Obj();
if ( currMin.woCompare( min ) || currMax.woCompare( max ) ) {
errmsg = "boundaries are outdated (likely a split occurred)";
result.append( "currMin" , currMin );
result.append( "currMax" , currMax );
result.append( "requestedMin" , min );
result.append( "requestedMax" , max );
log( LL_WARNING ) << "aborted moveChunk because" << errmsg << ": " << min << "->" << max
<< " is now " << currMin << "->" << currMax << endl;
return false;
}
if ( myOldShard != fromShard.getName() ) {
errmsg = "location is outdated (likely balance or migrate occurred)";
result.append( "from" , fromShard.getName() );
result.append( "official" , myOldShard );
log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard
<< " and not at " << fromShard.getName() << endl;
return false;
}
if ( maxVersion < shardingState.getVersion( ns ) ) {
errmsg = "official version less than mine?";
result.appendTimestamp( "officialVersion" , maxVersion );
result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) );
log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": official " << maxVersion
<< " mine: " << shardingState.getVersion(ns) << endl;
return false;
}
// since this could be the first call that enable sharding we also make sure to have the chunk manager up to date
shardingState.gotShardName( myOldShard );
ShardChunkVersion shardVersion;
shardingState.trySetVersion( ns , shardVersion /* will return updated */ );
log() << "moveChunk request accepted at version " << shardVersion << endl;
}
timing.done(2);
// 3.
MigrateStatusHolder statusHolder( ns , min , max );
{
// this gets a read lock, so we know we have a checkpoint for mods
if ( ! migrateFromStatus.storeCurrentLocs( maxChunkSize , errmsg , result ) )
return false;
ScopedDbConnection connTo( to );
BSONObj res;
bool ok = connTo->runCommand( "admin" ,
BSON( "_recvChunkStart" << ns <<
"from" << from <<
"min" << min <<
"max" << max <<
"configServer" << configServer.modelServer()
) ,
res );
connTo.done();
if ( ! ok ) {
errmsg = "moveChunk failed to engage TO-shard in the data transfer: ";
assert( res["errmsg"].type() );
errmsg += res["errmsg"].String();
result.append( "cause" , res );
return false;
}
}
timing.done( 3 );
// 4.
for ( int i=0; i<86400; i++ ) { // don't want a single chunk move to take more than a day
assert( dbMutex.getState() == 0 );
sleepsecs( 1 );
ScopedDbConnection conn( to );
BSONObj res;
bool ok = conn->runCommand( "admin" , BSON( "_recvChunkStatus" << 1 ) , res );
res = res.getOwned();
conn.done();
log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << endl;
if ( ! ok || res["state"].String() == "fail" ) {
log( LL_WARNING ) << "moveChunk error transfering data caused migration abort: " << res << endl;
errmsg = "data transfer error";
result.append( "cause" , res );
return false;
}
if ( res["state"].String() == "steady" )
break;
if ( migrateFromStatus.mbUsed() > (500 * 1024 * 1024) ) {
// this is too much memory for us to use for this
// so we're going to abort the migrate
ScopedDbConnection conn( to );
BSONObj res;
conn->runCommand( "admin" , BSON( "_recvChunkAbort" << 1 ) , res );
res = res.getOwned();
conn.done();
error() << "aborting migrate because too much memory used res: " << res << endl;
errmsg = "aborting migrate because too much memory used";
result.appendBool( "split" , true );
return false;
}
killCurrentOp.checkForInterrupt();
}
timing.done(4);
// 5.
{
// 5.a
// we're under the collection lock here, so no other migrate can change maxVersion or ShardChunkManager state
migrateFromStatus.setInCriticalSection( true );
ShardChunkVersion currVersion = maxVersion;
ShardChunkVersion myVersion = currVersion;
myVersion.incMajor();
{
writelock lk( ns );
assert( myVersion > shardingState.getVersion( ns ) );
// bump the chunks manager's version up and "forget" about the chunk being moved
// this is not the commit point but in practice the state in this shard won't until the commit it done
shardingState.donateChunk( ns , min , max , myVersion );
}
log() << "moveChunk setting version to: " << myVersion << endl;
// 5.b
// we're under the collection lock here, too, so we can undo the chunk donation because no other state change
// could be ongoing
{
BSONObj res;
ScopedDbConnection connTo( to );
bool ok = connTo->runCommand( "admin" ,
BSON( "_recvChunkCommit" << 1 ) ,
res );
connTo.done();
if ( ! ok ) {
{
writelock lk( ns );
// revert the chunk manager back to the state before "forgetting" about the chunk
shardingState.undoDonateChunk( ns , min , max , currVersion );
}
log() << "movChunk migrate commit not accepted by TO-shard: " << res
<< " resetting shard version to: " << currVersion << endl;
errmsg = "_recvChunkCommit failed!";
result.append( "cause" , res );
return false;
}
log() << "moveChunk migrate commit accepted by TO-shard: " << res << endl;
}
// 5.c
// version at which the next highest lastmod will be set
// if the chunk being moved is the last in the shard, nextVersion is that chunk's lastmod
// otherwise the highest version is from the chunk being bumped on the FROM-shard
ShardChunkVersion nextVersion;
// we want to go only once to the configDB but perhaps change two chunks, the one being migrated and another
// local one (so to bump version for the entire shard)
// we use the 'applyOps' mechanism to group the two updates and make them safer
// TODO pull config update code to a module
BSONObjBuilder cmdBuilder;
BSONArrayBuilder updates( cmdBuilder.subarrayStart( "applyOps" ) );
{
// update for the chunk being moved
BSONObjBuilder op;
op.append( "op" , "u" );
op.appendBool( "b" , false /* no upserting */ );
op.append( "ns" , ShardNS::chunk );
BSONObjBuilder n( op.subobjStart( "o" ) );
n.append( "_id" , Chunk::genID( ns , min ) );
n.appendTimestamp( "lastmod" , myVersion /* same as used on donateChunk */ );
n.append( "ns" , ns );
n.append( "min" , min );
n.append( "max" , max );
n.append( "shard" , toShard.getName() );
n.done();
BSONObjBuilder q( op.subobjStart( "o2" ) );
q.append( "_id" , Chunk::genID( ns , min ) );
q.done();
updates.append( op.obj() );
}
nextVersion = myVersion;
// if we have chunks left on the FROM shard, update the version of one of them as well
// we can figure that out by grabbing the chunkManager installed on 5.a
// TODO expose that manager when installing it
ShardChunkManagerPtr chunkManager = shardingState.getShardChunkManager( ns );
if( chunkManager->getNumChunks() > 0 ) {
// get another chunk on that shard
BSONObj lookupKey;
BSONObj bumpMin, bumpMax;
do {
chunkManager->getNextChunk( lookupKey , &bumpMin , &bumpMax );
lookupKey = bumpMin;
}
while( bumpMin == min );
BSONObjBuilder op;
op.append( "op" , "u" );
op.appendBool( "b" , false );
op.append( "ns" , ShardNS::chunk );
nextVersion.incMinor(); // same as used on donateChunk
BSONObjBuilder n( op.subobjStart( "o" ) );
n.append( "_id" , Chunk::genID( ns , bumpMin ) );
n.appendTimestamp( "lastmod" , nextVersion );
n.append( "ns" , ns );
n.append( "min" , bumpMin );
n.append( "max" , bumpMax );
n.append( "shard" , fromShard.getName() );
n.done();
BSONObjBuilder q( op.subobjStart( "o2" ) );
q.append( "_id" , Chunk::genID( ns , bumpMin ) );
q.done();
updates.append( op.obj() );
log() << "moveChunk updating self version to: " << nextVersion << " through "
<< bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << endl;
}
else {
log() << "moveChunk moved last chunk out for collection '" << ns << "'" << endl;
}
updates.done();
BSONArrayBuilder preCond( cmdBuilder.subarrayStart( "preCondition" ) );
{
BSONObjBuilder b;
b.append( "ns" , ShardNS::chunk );
b.append( "q" , BSON( "query" << BSON( "ns" << ns ) << "orderby" << BSON( "lastmod" << -1 ) ) );
{
BSONObjBuilder bb( b.subobjStart( "res" ) );
bb.appendTimestamp( "lastmod" , maxVersion );
bb.done();
}
preCond.append( b.obj() );
}
preCond.done();
BSONObj cmd = cmdBuilder.obj();
log(7) << "moveChunk update: " << cmd << endl;
bool ok = false;
BSONObj cmdResult;
try {
ScopedDbConnection conn( shardingState.getConfigServer() );
ok = conn->runCommand( "config" , cmd , cmdResult );
conn.done();
}