-
Notifications
You must be signed in to change notification settings - Fork 24.2k
/
Store.java
1198 lines (1084 loc) · 53.2 KB
/
Store.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.store;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.Directories;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.RefCounted;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.distributor.Distributor;
import java.io.*;
import java.nio.file.NoSuchFileException;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.zip.Adler32;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
/**
* A Store provides plain access to files written by an elasticsearch index shard. Each shard
* has a dedicated store that is uses to access Lucene's Directory which represents the lowest level
* of file abstraction in Lucene used to read and write Lucene indices.
* This class also provides access to metadata information like checksums for committed files. A committed
* file is a file that belongs to a segment written by a Lucene commit. Files that have not been committed
* ie. created during a merge or a shard refresh / NRT reopen are not considered in the MetadataSnapshot.
*
* Note: If you use a store it's reference count should be increased before using it by calling #incRef and a
* corresponding #decRef must be called in a try/finally block to release the store again ie.:
* <pre>
* store.incRef();
* try {
* // use the store...
*
* } finally {
* store.decRef();
* }
* </pre>
*/
public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted {
private static final String CODEC = "store";
private static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0
private static final int VERSION_START = 0;
private static final int VERSION = VERSION_STACK_TRACE;
private static final String CORRUPTED = "corrupted_";
private final AtomicBoolean isClosed = new AtomicBoolean(false);
private final CodecService codecService;
private final DirectoryService directoryService;
private final StoreDirectory directory;
private final ReentrantReadWriteLock metadataLock = new ReentrantReadWriteLock();
private final ShardLock shardLock;
private final AbstractRefCounted refCounter = new AbstractRefCounted("store") {
@Override
protected void closeInternal() {
// close us once we are done
Store.this.closeInternal();
}
};
private volatile OnCloseListener onClose;
@Inject
public Store(ShardId shardId, @IndexSettings Settings indexSettings, CodecService codecService, DirectoryService directoryService, Distributor distributor, ShardLock shardLock) throws IOException {
super(shardId, indexSettings);
this.codecService = codecService;
this.directoryService = directoryService;
this.directory = new StoreDirectory(directoryService.newFromDistributor(distributor));
this.shardLock = shardLock;
assert shardLock != null;
assert shardLock.getShardId().equals(shardId);
}
public Directory directory() {
ensureOpen();
return directory;
}
/**
* Returns the last committed segments info for this store
* @throws IOException if the index is corrupted or the segments file is not present
*/
public SegmentInfos readLastCommittedSegmentsInfo() throws IOException {
return readSegmentsInfo(null, directory());
}
/**
* Returns the segments info for the given commit or for the latest commit if the given commit is <code>null</code>
* @throws IOException if the index is corrupted or the segments file is not present
*/
private static SegmentInfos readSegmentsInfo(IndexCommit commit, Directory directory) throws IOException {
try {
return commit == null ? Lucene.readSegmentInfos(directory) : Lucene.readSegmentInfos(commit, directory);
} catch (EOFException eof) {
// TODO this should be caught by lucene - EOF is almost certainly an index corruption
throw new CorruptIndexException("Read past EOF while reading segment infos", "commit(" + commit + ")", eof);
} catch (IOException exception) {
throw exception; // IOExceptions like too many open files are not necessarily a corruption - just bubble it up
} catch (Exception ex) {
throw new CorruptIndexException("Hit unexpected exception while reading segment infos", "commit(" + commit + ")", ex);
}
}
final void ensureOpen() { // for testing
if (this.refCounter.refCount() <= 0) {
throw new AlreadyClosedException("store is already closed");
}
}
/**
* Returns a new MetadataSnapshot for the latest commit in this store or
* an empty snapshot if no index exists or can not be opened.
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
* unexpected exception when opening the index reading the segments file.
*/
public MetadataSnapshot getMetadataOrEmpty() throws IOException {
try {
return getMetadata(null);
} catch (IndexNotFoundException ex) {
// that's fine - happens all the time no need to log
} catch (FileNotFoundException | NoSuchFileException ex) {
logger.info("Failed to open / find files while reading metadata snapshot");
}
return MetadataSnapshot.EMPTY;
}
/**
* Returns a new MetadataSnapshot for the latest commit in this store.
*
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
* unexpected exception when opening the index reading the segments file.
* @throws FileNotFoundException if one or more files referenced by a commit are not present.
* @throws NoSuchFileException if one or more files referenced by a commit are not present.
* @throws IndexNotFoundException if no index / valid commit-point can be found in this store
*/
public MetadataSnapshot getMetadata() throws IOException {
return getMetadata(null);
}
/**
* Returns a new MetadataSnapshot for the given commit. If the given commit is <code>null</code>
* the latest commit point is used.
*
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
* unexpected exception when opening the index reading the segments file.
* @throws FileNotFoundException if one or more files referenced by a commit are not present.
* @throws NoSuchFileException if one or more files referenced by a commit are not present.
* @throws IndexNotFoundException if the commit point can't be found in this store
*/
public MetadataSnapshot getMetadata(IndexCommit commit) throws IOException {
ensureOpen();
failIfCorrupted();
metadataLock.readLock().lock();
try {
return new MetadataSnapshot(commit, directory, logger);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
markStoreCorrupted(ex);
throw ex;
} finally {
metadataLock.readLock().unlock();
}
}
/**
* Renames all the given files form the key of the map to the
* value of the map. All successfully renamed files are removed from the map in-place.
*/
public void renameFilesSafe(Map<String, String> tempFileMap) throws IOException {
// this works just like a lucene commit - we rename all temp files and once we successfully
// renamed all the segments we rename the commit to ensure we don't leave half baked commits behind.
final Map.Entry<String, String>[] entries = tempFileMap.entrySet().toArray(new Map.Entry[tempFileMap.size()]);
ArrayUtil.timSort(entries, new Comparator<Map.Entry<String, String>>() {
@Override
public int compare(Map.Entry<String, String> o1, Map.Entry<String, String> o2) {
String left = o1.getValue();
String right = o2.getValue();
if (left.startsWith(IndexFileNames.SEGMENTS) || right.startsWith(IndexFileNames.SEGMENTS)) {
if (left.startsWith(IndexFileNames.SEGMENTS) == false) {
return -1;
} else if (right.startsWith(IndexFileNames.SEGMENTS) == false) {
return 1;
}
}
return left.compareTo(right);
}
});
metadataLock.writeLock().lock();
// we make sure that nobody fetches the metadata while we do this rename operation here to ensure we don't
// get exceptions if files are still open.
try {
for (Map.Entry<String, String> entry : entries) {
String tempFile = entry.getKey();
String origFile = entry.getValue();
// first, go and delete the existing ones
try {
directory.deleteFile(origFile);
} catch (FileNotFoundException | NoSuchFileException e) {
} catch (Throwable ex) {
logger.debug("failed to delete file [{}]", ex, origFile);
}
// now, rename the files... and fail it it won't work
this.renameFile(tempFile, origFile);
final String remove = tempFileMap.remove(tempFile);
assert remove != null;
}
} finally {
metadataLock.writeLock().unlock();
}
}
/**
* Deletes the content of a shard store. Be careful calling this!.
*/
public void deleteContent() throws IOException {
ensureOpen();
final String[] files = directory.listAll();
final List<IOException> exceptions = new ArrayList<>();
for (String file : files) {
try {
directory.deleteFile(file);
} catch (NoSuchFileException | FileNotFoundException e) {
// ignore
} catch (IOException e) {
exceptions.add(e);
}
}
ExceptionsHelper.rethrowAndSuppress(exceptions);
}
public StoreStats stats() throws IOException {
ensureOpen();
return new StoreStats(Directories.estimateSize(directory), directoryService.throttleTimeInNanos());
}
public void renameFile(String from, String to) throws IOException {
ensureOpen();
directory.renameFile(from, to);
}
/**
* Returns <tt>true</tt> by default.
*/
public boolean suggestUseCompoundFile() {
return false;
}
/**
* Increments the refCount of this Store instance. RefCounts are used to determine when a
* Store can be closed safely, i.e. as soon as there are no more references. Be sure to always call a
* corresponding {@link #decRef}, in a finally clause; otherwise the store may never be closed. Note that
* {@link #close} simply calls decRef(), which means that the Store will not really be closed until {@link
* #decRef} has been called for all outstanding references.
*
* Note: Close can safely be called multiple times.
* @see #decRef
* @see #tryIncRef()
* @throws AlreadyClosedException iff the reference counter can not be incremented.
*/
@Override
public final void incRef() {
refCounter.incRef();
}
/**
* Tries to increment the refCount of this Store instance. This method will return <tt>true</tt> iff the refCount was
* incremented successfully otherwise <tt>false</tt>. RefCounts are used to determine when a
* Store can be closed safely, i.e. as soon as there are no more references. Be sure to always call a
* corresponding {@link #decRef}, in a finally clause; otherwise the store may never be closed. Note that
* {@link #close} simply calls decRef(), which means that the Store will not really be closed until {@link
* #decRef} has been called for all outstanding references.
*
* Note: Close can safely be called multiple times.
* @see #decRef()
* @see #incRef()
*/
@Override
public final boolean tryIncRef() {
return refCounter.tryIncRef();
}
/**
* Decreases the refCount of this Store instance.If the refCount drops to 0, then this
* store is closed.
* @see #incRef
*/
@Override
public final void decRef() {
refCounter.decRef();
}
@Override
public void close() {
close(null);
}
/**
* Closes this store and installs the given {@link org.elasticsearch.index.store.Store.OnCloseListener}
* to be notified once all references to this store are released and the store is closed.
*/
public void close(@Nullable OnCloseListener onClose) {
if (isClosed.compareAndSet(false, true)) {
assert this.onClose == null : "OnClose listener is already set";
this.onClose = onClose;
// only do this once!
decRef();
logger.debug("store reference count on close: " + refCounter.refCount());
}
}
private void closeInternal() {
final OnCloseListener listener = onClose;
onClose = null;
try {
directory.innerClose(); // this closes the distributorDirectory as well
} catch (IOException e) {
logger.debug("failed to close directory", e);
} finally {
try {
if (listener != null) {
listener.onClose(shardId);
}
} catch (Exception ex){
logger.debug("OnCloseListener threw an exception", ex);
} finally {
IOUtils.closeWhileHandlingException(shardLock);
}
}
}
/**
* Reads a MetadataSnapshot from the given index locations or returns an empty snapshot if it can't be read.
* @throws IOException if the index we try to read is corrupted
*/
public static MetadataSnapshot readMetadataSnapshot(File[] indexLocations, ESLogger logger) throws IOException {
final Directory[] dirs = new Directory[indexLocations.length];
try {
for (int i=0; i< indexLocations.length; i++) {
dirs[i] = new SimpleFSDirectory(indexLocations[i].toPath());
}
DistributorDirectory dir = new DistributorDirectory(dirs);
failIfCorrupted(dir, new ShardId("", 1));
return new MetadataSnapshot(null, dir, logger);
} catch (IndexNotFoundException ex) {
// that's fine - happens all the time no need to log
} catch (FileNotFoundException | NoSuchFileException ex) {
logger.info("Failed to open / find files while reading metadata snapshot");
} finally {
IOUtils.close(dirs);
}
return MetadataSnapshot.EMPTY;
}
/**
* The returned IndexOutput might validate the files checksum if the file has been written with a newer lucene version
* and the metadata holds the necessary information to detect that it was been written by Lucene 4.8 or newer. If it has only
* a legacy checksum, returned IndexOutput will not verify the checksum.
*
* Note: Checksums are calculated nevertheless since lucene does it by default sicne version 4.8.0. This method only adds the
* verification against the checksum in the given metadata and does not add any significant overhead.
*/
public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException {
IndexOutput output = directory().createOutput(fileName, context);
boolean success = false;
try {
if (metadata.hasLegacyChecksum()) {
logger.debug("create legacy adler32 output for {}", fileName);
output = new LegacyVerification.Adler32VerifyingIndexOutput(output, metadata.checksum(), metadata.length());
} else if (metadata.checksum() == null) {
// TODO: when the file is a segments_N, we can still CRC-32 + length for more safety
// its had that checksum forever.
logger.debug("create legacy length-only output for {}", fileName);
output = new LegacyVerification.LengthVerifyingIndexOutput(output, metadata.length());
} else {
assert metadata.writtenBy() != null;
assert metadata.writtenBy().onOrAfter(Version.LUCENE_4_8);
output = new LuceneVerifyingIndexOutput(metadata, output);
}
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(output);
}
}
return output;
}
public static void verify(IndexOutput output) throws IOException {
if (output instanceof VerifyingIndexOutput) {
((VerifyingIndexOutput)output).verify();
}
}
public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException {
if (metadata.hasLegacyChecksum() || metadata.checksum() == null) {
logger.debug("open legacy input for {}", filename);
return directory().openInput(filename, context);
}
assert metadata.writtenBy() != null;
assert metadata.writtenBy().onOrAfter(Version.LUCENE_4_8_0);
return new VerifyingIndexInput(directory().openInput(filename, context));
}
public static void verify(IndexInput input) throws IOException {
if (input instanceof VerifyingIndexInput) {
((VerifyingIndexInput)input).verify();
}
}
public boolean checkIntegrity(StoreFileMetaData md) {
return checkIntegrity(md, directory());
}
public static boolean checkIntegrity(final StoreFileMetaData md, final Directory directory) {
try (IndexInput input = directory.openInput(md.name(), IOContext.READONCE)) {
if (input.length() != md.length()) { // first check the length no matter how old this file is
return false;
}
if (md.writtenBy() != null && md.writtenBy().onOrAfter(Version.LUCENE_4_8_0)) {
return Store.digestToString(CodecUtil.checksumEntireFile(input)).equals(md.checksum());
} else if (md.hasLegacyChecksum()) {
// legacy checksum verification - no footer that we need to omit in the checksum!
final Checksum checksum = new Adler32();
final byte[] buffer = new byte[md.length() > 4096 ? 4096 : (int) md.length()];
final long len = input.length();
long read = 0;
while (len > read) {
final long bytesLeft = len - read;
final int bytesToRead = bytesLeft < buffer.length ? (int) bytesLeft : buffer.length;
input.readBytes(buffer, 0, bytesToRead, false);
checksum.update(buffer, 0, bytesToRead);
read += bytesToRead;
}
return Store.digestToString(checksum.getValue()).equals(md.checksum());
}
} catch (IOException ex) {
return false;
}
return true;
}
public boolean isMarkedCorrupted() throws IOException {
ensureOpen();
/* marking a store as corrupted is basically adding a _corrupted to all
* the files. This prevent
*/
final String[] files = directory().listAll();
for (String file : files) {
if (file.startsWith(CORRUPTED)) {
return true;
}
}
return false;
}
public void failIfCorrupted() throws IOException {
ensureOpen();
failIfCorrupted(directory, shardId);
}
private static final void failIfCorrupted(Directory directory, ShardId shardId) throws IOException {
final String[] files = directory.listAll();
List<CorruptIndexException> ex = new ArrayList<>();
for (String file : files) {
if (file.startsWith(CORRUPTED)) {
try(ChecksumIndexInput input = directory.openChecksumInput(file, IOContext.READONCE)) {
int version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION);
String msg = input.readString();
StringBuilder builder = new StringBuilder(shardId.toString());
builder.append(" Preexisting corrupted index [");
builder.append(file).append("] caused by: ");
builder.append(msg);
if (version == VERSION_STACK_TRACE) {
builder.append(System.lineSeparator());
builder.append(input.readString());
}
ex.add(new CorruptIndexException(builder.toString(), "preexisting_corruption"));
CodecUtil.checkFooter(input);
}
}
}
if (ex.isEmpty() == false) {
ExceptionsHelper.rethrowAndSuppress(ex);
}
}
/**
* This exists so {@link org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat} can load its boolean setting; can we find a more straightforward way?
*/
public final class StoreDirectory extends FilterDirectory {
public final ESLogger deletesLogger;
StoreDirectory(Directory delegateDirectory) throws IOException {
super(delegateDirectory);
deletesLogger = Loggers.getLogger("index.store.deletes", indexSettings, shardId);
}
public ShardId shardId() {
ensureOpen();
return Store.this.shardId();
}
@Override
public void close() throws IOException {
assert false : "Nobody should close this directory except of the Store itself";
}
@Override
public void deleteFile(String name) throws IOException {
logDeleteFile("StoreDirectory.deleteFile", name);
super.deleteFile(name);
}
private void innerClose() throws IOException {
super.close();
}
@Override
public String toString() {
return "store(" + in.toString() + ")";
}
}
/** Log that we are about to delete this file, to the index.store.deletes component. */
public void logDeleteFile(String message, String fileName) {
logDeleteFile(directory(), message, fileName);
}
/** Log that we are about to delete this file, to the index.store.deletes component. */
public static void logDeleteFile(Directory dir, String message, String fileName) {
assert dir instanceof StoreDirectory;
if (dir instanceof StoreDirectory) {
((StoreDirectory) dir).deletesLogger.trace("{}: delete file {}", message, fileName);
}
// else what to do...?
}
/**
* Represents a snapshot of the current directory build from the latest Lucene commit.
* Only files that are part of the last commit are considered in this datastrucutre.
* For backwards compatibility the snapshot might include legacy checksums that
* are derived from a dedicated checksum file written by older elasticsearch version pre 1.3
*
* Note: This class will ignore the <tt>segments.gen</tt> file since it's optional and might
* change concurrently for safety reasons.
*
* @see StoreFileMetaData
*/
public final static class MetadataSnapshot implements Iterable<StoreFileMetaData> {
private static final ESLogger logger = Loggers.getLogger(MetadataSnapshot.class);
private static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8;
// we stopped writing legacy checksums in 1.3.0 so all segments here must use the new CRC32 version
private static final Version FIRST_ES_CRC32_VERSION = org.elasticsearch.Version.V_1_3_0.luceneVersion;
private final Map<String, StoreFileMetaData> metadata;
public static final MetadataSnapshot EMPTY = new MetadataSnapshot();
public MetadataSnapshot(Map<String, StoreFileMetaData> metadata) {
this.metadata = metadata;
}
MetadataSnapshot() {
this.metadata = Collections.emptyMap();
}
MetadataSnapshot(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
metadata = buildMetadata(commit, directory, logger);
}
private static final boolean useLuceneChecksum(Version version, boolean hasLegacyChecksum) {
return (version.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) && hasLegacyChecksum == false) // no legacy checksum and a guarantee that lucene has checksums
|| version.onOrAfter(FIRST_ES_CRC32_VERSION); // OR we know that we didn't even write legacy checksums anymore when this segment was written.
}
ImmutableMap<String, StoreFileMetaData> buildMetadata(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
ImmutableMap.Builder<String, StoreFileMetaData> builder = ImmutableMap.builder();
Map<String, String> checksumMap = readLegacyChecksums(directory).v1();
try {
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
Version maxVersion = Version.LUCENE_4_0; // we don't know which version was used to write so we take the max version.
for (SegmentCommitInfo info : segmentCommitInfos) {
final Version version = info.info.getVersion();
if (version != null && version.onOrAfter(maxVersion)) {
maxVersion = version;
}
for (String file : info.files()) {
String legacyChecksum = checksumMap.get(file);
if (useLuceneChecksum(version, legacyChecksum != null)) {
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
} else {
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), legacyChecksum, null));
}
}
}
final String segmentsFile = segmentCommitInfos.getSegmentsFileName();
String legacyChecksum = checksumMap.get(segmentsFile);
if (useLuceneChecksum(maxVersion, legacyChecksum != null)) {
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
} else {
builder.put(segmentsFile, new StoreFileMetaData(segmentsFile, directory.fileLength(segmentsFile), legacyChecksum, null));
}
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
throw ex;
} catch (Throwable ex) {
try {
// Lucene checks the checksum after it tries to lookup the codec etc.
// in that case we might get only IAE or similar exceptions while we are really corrupt...
// TODO we should check the checksum in lucene if we hit an exception
Lucene.checkSegmentInfoIntegrity(directory);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) {
cex.addSuppressed(ex);
throw cex;
} catch (Throwable e) {
// ignore...
}
throw ex;
}
return builder.build();
}
/**
* Reads legacy checksum files found in the directory.
*
* Files are expected to start with _checksums- prefix
* followed by long file version. Only file with the highest version is read, all other files are ignored.
*
* @param directory the directory to read checksums from
* @return a map of file checksums and the checksum file version
* @throws IOException
*/
static Tuple<Map<String, String>, Long> readLegacyChecksums(Directory directory) throws IOException {
synchronized (directory) {
long lastFound = -1;
for (String name : directory.listAll()) {
if (!isChecksum(name)) {
continue;
}
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
if (current > lastFound) {
lastFound = current;
}
}
if (lastFound > -1) {
try (IndexInput indexInput = directory.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE)) {
indexInput.readInt(); // version
return new Tuple(indexInput.readStringStringMap(), lastFound);
}
}
return new Tuple(new HashMap<>(), -1l);
}
}
/**
* Deletes all checksum files with version lower than newVersion.
*
* @param directory the directory to clean
* @param newVersion the latest checksum file version
* @throws IOException
*/
static void cleanLegacyChecksums(Directory directory, long newVersion) throws IOException {
synchronized (directory) {
for (String name : directory.listAll()) {
if (isChecksum(name)) {
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
if (current < newVersion) {
try {
directory.deleteFile(name);
} catch (IOException ex) {
logger.debug("can't delete old checksum file [{}]", ex, name);
}
}
}
}
}
}
private static void checksumFromLuceneFile(Directory directory, String file, ImmutableMap.Builder<String, StoreFileMetaData> builder, ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
final String checksum;
final BytesRefBuilder fileHash = new BytesRefBuilder();
try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {
try {
if (in.length() < CodecUtil.footerLength()) {
// truncated files trigger IAE if we seek negative... these files are really corrupted though
throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in);
}
if (readFileAsHash) {
hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length());
}
checksum = digestToString(CodecUtil.retrieveChecksum(in));
} catch (Throwable ex) {
logger.debug("Can retrieve checksum from file [{}]", ex, file);
throw ex;
}
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), checksum, version, fileHash.get()));
}
}
/**
* Computes a strong hash value for small files. Note that this method should only be used for files < 1MB
*/
public static void hashFile(BytesRefBuilder fileHash, InputStream in, long size) throws IOException {
final int len = (int)Math.min(1024 * 1024, size); // for safety we limit this to 1MB
fileHash.grow(len);
fileHash.setLength(len);
final int readBytes = Streams.readFully(in, fileHash.bytes(), 0, len);
assert readBytes == len : Integer.toString(readBytes) + " != " + Integer.toString(len);
assert fileHash.length() == len : Integer.toString(fileHash.length()) + " != " + Integer.toString(len);
}
@Override
public Iterator<StoreFileMetaData> iterator() {
return metadata.values().iterator();
}
public StoreFileMetaData get(String name) {
return metadata.get(name);
}
public Map<String, StoreFileMetaData> asMap() {
return metadata;
}
private static final String DEL_FILE_EXTENSION = "del"; // legacy delete file
private static final String LIV_FILE_EXTENSION = "liv"; // lucene 5 delete file
private static final String FIELD_INFOS_FILE_EXTENSION = "fnm";
private static final String SEGMENT_INFO_EXTENSION = "si";
/**
* Returns a diff between the two snapshots that can be used for recovery. The given snapshot is treated as the
* recovery target and this snapshot as the source. The returned diff will hold a list of files that are:
* <ul>
* <li>identical: they exist in both snapshots and they can be considered the same ie. they don't need to be recovered</li>
* <li>different: they exist in both snapshots but their they are not identical</li>
* <li>missing: files that exist in the source but not in the target</li>
* </ul>
* This method groups file into per-segment files and per-commit files. A file is treated as
* identical if and on if all files in it's group are identical. On a per-segment level files for a segment are treated
* as identical iff:
* <ul>
* <li>all files in this segment have the same checksum</li>
* <li>all files in this segment have the same length</li>
* <li>the segments <tt>.si</tt> files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the <tt>.si</tt> file content as it's hash</li>
* </ul>
*
* The <tt>.si</tt> file contains a lot of diagnostics including a timestamp etc. in the future there might be
* unique segment identifiers in there hardening this method further.
*
* The per-commit files handles very similar. A commit is composed of the <tt>segments_N</tt> files as well as generational files like
* deletes (<tt>_x_y.del</tt>) or field-info (<tt>_x_y.fnm</tt>) files. On a per-commit level files for a commit are treated
* as identical iff:
* <ul>
* <li>all files belonging to this commit have the same checksum</li>
* <li>all files belonging to this commit have the same length</li>
* <li>the segments file <tt>segments_N</tt> files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the <tt>segments_N</tt> file content as it's hash</li>
* </ul>
*
* NOTE: this diff will not contain the <tt>segments.gen</tt> file. This file is omitted on recovery.
*/
public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) {
final ImmutableList.Builder<StoreFileMetaData> identical = ImmutableList.builder();
final ImmutableList.Builder<StoreFileMetaData> different = ImmutableList.builder();
final ImmutableList.Builder<StoreFileMetaData> missing = ImmutableList.builder();
final Map<String, List<StoreFileMetaData>> perSegment = new HashMap<>();
final List<StoreFileMetaData> perCommitStoreFiles = new ArrayList<>();
for (StoreFileMetaData meta : this) {
if (IndexFileNames.OLD_SEGMENTS_GEN.equals(meta.name())) { // legacy
continue; // we don't need that file at all
}
final String segmentId = IndexFileNames.parseSegmentName(meta.name());
final String extension = IndexFileNames.getExtension(meta.name());
assert FIELD_INFOS_FILE_EXTENSION.equals(extension) == false || IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(meta.name())).isEmpty() : "FieldInfos are generational but updateable DV are not supported in elasticsearch";
if (IndexFileNames.SEGMENTS.equals(segmentId) || DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) {
// only treat del files as per-commit files fnm files are generational but only for upgradable DV
perCommitStoreFiles.add(meta);
} else {
List<StoreFileMetaData> perSegStoreFiles = perSegment.get(segmentId);
if (perSegStoreFiles == null) {
perSegStoreFiles = new ArrayList<>();
perSegment.put(segmentId, perSegStoreFiles);
}
perSegStoreFiles.add(meta);
}
}
final ArrayList<StoreFileMetaData> identicalFiles = new ArrayList<>();
for (List<StoreFileMetaData> segmentFiles : Iterables.concat(perSegment.values(), Collections.singleton(perCommitStoreFiles))) {
identicalFiles.clear();
boolean consistent = true;
for (StoreFileMetaData meta : segmentFiles) {
StoreFileMetaData storeFileMetaData = recoveryTargetSnapshot.get(meta.name());
if (storeFileMetaData == null) {
consistent = false;
missing.add(meta);
} else if (storeFileMetaData.isSame(meta) == false) {
consistent = false;
different.add(meta);
} else {
identicalFiles.add(meta);
}
}
if (consistent) {
identical.addAll(identicalFiles);
} else {
// make sure all files are added - this can happen if only the deletes are different
different.addAll(identicalFiles);
}
}
RecoveryDiff recoveryDiff = new RecoveryDiff(identical.build(), different.build(), missing.build());
assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1: 0)
: "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]" ;
return recoveryDiff;
}
/**
* Returns the number of files in this snapshot
*/
public int size() {
return metadata.size();
}
}
/**
* A class representing the diff between a recovery source and recovery target
* @see MetadataSnapshot#recoveryDiff(org.elasticsearch.index.store.Store.MetadataSnapshot)
*/
public static final class RecoveryDiff {
/**
* Files that exist in both snapshots and they can be considered the same ie. they don't need to be recovered
*/
public final List<StoreFileMetaData> identical;
/**
* Files that exist in both snapshots but their they are not identical
*/
public final List<StoreFileMetaData> different;
/**
* Files that exist in the source but not in the target
*/
public final List<StoreFileMetaData> missing;
RecoveryDiff(List<StoreFileMetaData> identical, List<StoreFileMetaData> different, List<StoreFileMetaData> missing) {
this.identical = identical;
this.different = different;
this.missing = missing;
}
/**
* Returns the sum of the files in this diff.
*/
public int size() {
return identical.size() + different.size() + missing.size();
}
}
public final static class LegacyChecksums {
private final Map<String, String> legacyChecksums = new HashMap<>();
public void add(StoreFileMetaData metaData) throws IOException {
if (metaData.hasLegacyChecksum()) {
synchronized (this) {
// we don't add checksums if they were written by LUCENE_48... now we are using the build in mechanism.
legacyChecksums.put(metaData.name(), metaData.checksum());
}
}
}
public synchronized void write(Store store) throws IOException {
synchronized (store.directory) {
Tuple<Map<String, String>, Long> tuple = MetadataSnapshot.readLegacyChecksums(store.directory);
tuple.v1().putAll(legacyChecksums);
if (!tuple.v1().isEmpty()) {
writeChecksums(store.directory, tuple.v1(), tuple.v2());
}
}
}
synchronized void writeChecksums(Directory directory, Map<String, String> checksums, long lastVersion) throws IOException {
long nextVersion = System.currentTimeMillis();
while (nextVersion <= lastVersion) {
nextVersion = System.currentTimeMillis();
}
final String checksumName = CHECKSUMS_PREFIX + nextVersion;
try (IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT)) {
output.writeInt(0); // version
output.writeStringStringMap(checksums);
}
directory.sync(Collections.singleton(checksumName));
MetadataSnapshot.cleanLegacyChecksums(directory, nextVersion);
}
public void clear() {
this.legacyChecksums.clear();
}
public void remove(String name) {
legacyChecksums.remove(name);
}
}
public static final String CHECKSUMS_PREFIX = "_checksums-";
public static final boolean isChecksum(String name) {
// TODO can we drowp .cks
return name.startsWith(CHECKSUMS_PREFIX) || name.endsWith(".cks"); // bwcomapt - .cks used to be a previous checksum file
}
/**
* Produces a string representation of the given digest value.
*/
public static String digestToString(long digest) {
return Long.toString(digest, Character.MAX_RADIX);
}
static class LuceneVerifyingIndexOutput extends VerifyingIndexOutput {
private final StoreFileMetaData metadata;
private long writtenBytes;
private final long checksumPosition;
private String actualChecksum;
LuceneVerifyingIndexOutput(StoreFileMetaData metadata, IndexOutput out) {
super(out);
this.metadata = metadata;
checksumPosition = metadata.length() - 8; // the last 8 bytes are the checksum
}
@Override
public void verify() throws IOException {
if (metadata.checksum().equals(actualChecksum) && writtenBytes == metadata.length()) {
return;
}
throw new CorruptIndexException("verification failed (hardware problem?) : expected=" + metadata.checksum() +
" actual=" + actualChecksum + " writtenLength=" + writtenBytes + " expectedLength=" + metadata.length() +
" (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")");
}
@Override
public void writeByte(byte b) throws IOException {
if (writtenBytes++ == checksumPosition) {
readAndCompareChecksum();
}
out.writeByte(b);
}
private void readAndCompareChecksum() throws IOException {
actualChecksum = digestToString(getChecksum());
if (!metadata.checksum().equals(actualChecksum)) {