/
DataStoreBlobStore.java
1184 lines (1015 loc) · 42.2 KB
/
DataStoreBlobStore.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.jackrabbit.oak.plugins.blob.datastore;
import static org.apache.jackrabbit.guava.common.base.Preconditions.checkNotNull;
import static org.apache.jackrabbit.guava.common.collect.Iterators.filter;
import static org.apache.jackrabbit.guava.common.collect.Iterators.transform;
import static org.apache.commons.io.IOUtils.closeQuietly;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.SequenceInputStream;
import java.net.URI;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import javax.jcr.RepositoryException;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.jackrabbit.core.data.DataIdentifier;
import org.apache.jackrabbit.core.data.DataRecord;
import org.apache.jackrabbit.core.data.DataStore;
import org.apache.jackrabbit.core.data.DataStoreException;
import org.apache.jackrabbit.core.data.MultiDataStoreAware;
import org.apache.jackrabbit.guava.common.cache.LoadingCache;
import org.apache.jackrabbit.guava.common.cache.Weigher;
import org.apache.jackrabbit.oak.api.Blob;
import org.apache.jackrabbit.oak.api.blob.BlobAccessProvider;
import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions;
import org.apache.jackrabbit.oak.api.blob.BlobUpload;
import org.apache.jackrabbit.oak.api.blob.BlobUploadOptions;
import org.apache.jackrabbit.oak.cache.CacheLIRS;
import org.apache.jackrabbit.oak.cache.CacheStats;
import org.apache.jackrabbit.oak.commons.StringUtils;
import org.apache.jackrabbit.oak.plugins.blob.BlobStoreBlob;
import org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore;
import org.apache.jackrabbit.oak.plugins.blob.ExtendedBlobStatsCollector;
import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore;
import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordAccessProvider;
import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions;
import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload;
import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException;
import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions;
import org.apache.jackrabbit.oak.spi.blob.BlobOptions;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore;
import org.apache.jackrabbit.oak.spi.blob.stats.BlobStatsCollector;
import org.apache.jackrabbit.oak.spi.blob.stats.StatsCollectingStreams;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.jackrabbit.guava.common.base.Function;
import org.apache.jackrabbit.guava.common.base.Predicate;
import org.apache.jackrabbit.guava.common.base.Strings;
import org.apache.jackrabbit.guava.common.collect.Iterators;
import org.apache.jackrabbit.guava.common.collect.Lists;
import org.apache.jackrabbit.guava.common.io.ByteStreams;
import org.apache.jackrabbit.guava.common.io.Closeables;
/**
* BlobStore wrapper for DataStore. Wraps Jackrabbit 2 DataStore and expose them as BlobStores
* It also handles inlining binaries if there size is smaller than
* {@link org.apache.jackrabbit.core.data.DataStore#getMinRecordLength()}
*/
public class DataStoreBlobStore
implements DataStore, BlobStore, GarbageCollectableBlobStore, BlobTrackingStore, TypedDataStore,
BlobAccessProvider {
private final Logger log = LoggerFactory.getLogger(getClass());
/**
* Flag to determine whether to remove repository id from DataStore on close.
*/
private final boolean SHARED_TRANSIENT = Boolean.parseBoolean(
System.getProperty("oak.datastore.sharedTransient"));
protected final DataStore delegate;
protected BlobStatsCollector stats = ExtendedBlobStatsCollector.NOOP;
private BlobTracker tracker;
/**
* If set to true then the blob length information would be encoded as part of blobId
* and thus no extra call would be made to DataStore to determine the length
*
* <b>Implementation Note</b>If enabled the length would be encoded in blobid by appending it at the end.
* This would be done for the methods which are part of BlobStore and GarbageCollectableBlobStore interface
*
* DataIdentifiers which are part of DataStore would not be affected by this as DataStore interface
* is not used in Oak and all access is via BlobStore interface
*/
private final boolean encodeLengthInId;
protected final LoadingCache<String, byte[]> cache;
public static final int DEFAULT_CACHE_SIZE = 16;
/**
* Max size of binary whose content would be cached. We keep it greater than
* Lucene blob size OakDirectory#BLOB_SIZE such that Lucene index blobs are cached
*/
private int maxCachedBinarySize = 1024 * 1024;
private final Weigher<String, byte[]> weigher = new Weigher<String, byte[]>() {
@Override
public int weigh(@NotNull String key, @NotNull byte[] value) {
long weight = (long)StringUtils.estimateMemoryUsage(key) + value.length;
if (weight > Integer.MAX_VALUE) {
log.debug("Calculated weight larger than Integer.MAX_VALUE: {}.", weight);
weight = Integer.MAX_VALUE;
}
return (int) weight;
}
};
private final CacheStats cacheStats;
public static final String MEM_CACHE_NAME = "BlobStore-MemCache";
private String repositoryId;
public DataStoreBlobStore(DataStore delegate) {
this(delegate, true, DEFAULT_CACHE_SIZE);
}
public DataStoreBlobStore(DataStore delegate, boolean encodeLengthInId) {
this(delegate, encodeLengthInId, DEFAULT_CACHE_SIZE);
}
public DataStoreBlobStore(DataStore delegate, boolean encodeLengthInId, int cacheSizeInMB) {
this.delegate = delegate;
this.encodeLengthInId = encodeLengthInId;
long cacheSize = (long) cacheSizeInMB * FileUtils.ONE_MB;
this.cache = CacheLIRS.<String, byte[]>newBuilder()
.module(MEM_CACHE_NAME)
.recordStats()
.maximumWeight(cacheSize)
.weigher(weigher)
.build();
this.cacheStats = new CacheStats(cache, MEM_CACHE_NAME, weigher, cacheSize);
}
//~----------------------------------< DataStore >
@Override
public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException {
try {
long start = System.nanoTime();
DataRecord rec = isInMemoryRecord(identifier) ?
getDataRecord(identifier.toString()) :
delegate.getRecordIfStored(identifier);
long elapsed = System.nanoTime() - start;
stats.getRecordIfStoredCalled(elapsed, TimeUnit.NANOSECONDS, rec.getLength());
stats.getRecordIfStoredCompleted(identifier.toString());
return rec;
}
catch (DataStoreException e) {
stats.getRecordIfStoredFailed(identifier.toString());
throw e;
}
}
@Override
public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException {
try {
long start = System.nanoTime();
DataRecord rec = isInMemoryRecord(identifier) ?
getDataRecord(identifier.toString()) :
delegate.getRecord(identifier);
long elapsed = System.nanoTime() - start;
stats.getRecordCalled(elapsed, TimeUnit.NANOSECONDS, rec.getLength());
stats.getRecordCompleted(identifier.toString());
return rec;
}
catch (DataStoreException e) {
stats.getRecordFailed(identifier.toString());
throw e;
}
}
@Override
public DataRecord getRecordFromReference(String reference) throws DataStoreException {
try {
long start = System.nanoTime();
DataRecord rec = delegate.getRecordFromReference(reference);
long elapsed = System.nanoTime() - start;
stats.getRecordFromReferenceCalled(elapsed, TimeUnit.NANOSECONDS, rec.getLength());
stats.getRecordFromReferenceCompleted(reference);
return rec;
}
catch (DataStoreException e) {
stats.getRecordFromReferenceFailed(reference);
throw e;
}
}
@Override
public DataRecord addRecord(InputStream stream) throws DataStoreException {
try {
long start = System.nanoTime();
DataRecord rec = writeStream(stream, new BlobOptions());
stats.recordAdded(System.nanoTime() - start, TimeUnit.NANOSECONDS, rec.getLength());
stats.addRecordCompleted(rec.getIdentifier().toString());
return rec;
}
catch (IOException e) {
stats.addRecordFailed();
throw new DataStoreException(e);
}
catch (DataStoreException e) {
stats.addRecordFailed();
throw e;
}
}
@Override
public void updateModifiedDateOnAccess(long before) {
delegate.updateModifiedDateOnAccess(before);
}
@Override
public int deleteAllOlderThan(long min) throws DataStoreException {
try {
long start = System.nanoTime();
int deletedCount = delegate.deleteAllOlderThan(min);
stats.deletedAllOlderThan(System.nanoTime() - start, TimeUnit.NANOSECONDS, min);
stats.deleteAllOlderThanCompleted(deletedCount);
return deletedCount;
}
catch (Exception e) {
stats.deleteAllOlderThanFailed(min);
throw e;
}
}
@Override
public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
try {
long start = System.nanoTime();
Iterator<DataIdentifier> allIdentifiersIterator = delegate.getAllIdentifiers();
stats.getAllIdentifiersCalled(System.nanoTime() - start, TimeUnit.NANOSECONDS);
stats.getAllIdentifiersCompleted();
return allIdentifiersIterator;
}
catch (Exception e) {
stats.getAllIdentifiersFailed();
throw e;
}
}
@Override
public void init(String homeDir) throws RepositoryException {
throw new UnsupportedOperationException("DataStore cannot be initialized again");
}
@Override
public int getMinRecordLength() {
return delegate.getMinRecordLength();
}
@Override
public void close() throws DataStoreException {
// If marked as shared transient then delete the repository marker in close
if (SHARED_TRANSIENT) {
if (!Strings.isNullOrEmpty(getRepositoryId())) {
deleteMetadataRecord(SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY.getNameFromId(getRepositoryId()));
}
}
delegate.close();
cache.invalidateAll();
closeQuietly(tracker);
}
//~-------------------------------------------< BlobStore >
@Override
public String writeBlob(InputStream stream) throws IOException {
return writeBlob(stream, new BlobOptions());
}
@Override
public String writeBlob(InputStream stream, BlobOptions options) throws IOException {
boolean threw = true;
try {
long start = System.nanoTime();
checkNotNull(stream);
DataRecord dr = writeStream(stream, options);
String id = getBlobId(dr);
updateTracker(id);
threw = false;
stats.uploaded(System.nanoTime() - start, TimeUnit.NANOSECONDS, dr.getLength());
stats.uploadCompleted(id);
return id;
} catch (DataStoreException e) {
stats.uploadFailed();
throw new IOException(e);
} finally {
//DataStore does not closes the stream internally
//So close the stream explicitly
Closeables.close(stream, threw);
}
}
private void updateTracker(String id) {
if (tracker != null && !InMemoryDataRecord.isInstance(id)) {
try {
tracker.add(id);
log.trace("Tracked Id {}", id);
}
catch (Exception e) {
log.warn("Could not add track id", e);
}
}
}
@Override
public int readBlob(String encodedBlobId, long pos, byte[] buff, int off, int length) throws IOException {
//This is inefficient as repeated calls for same blobId would involve opening new Stream
//instead clients should directly access the stream from DataRecord by special casing for
//BlobStore which implements DataStore
InputStream stream = getInputStream(encodedBlobId);
boolean threw = true;
try {
ByteStreams.skipFully(stream, pos);
int readCount = stream.read(buff, off, length);
threw = false;
return readCount;
} finally {
Closeables.close(stream, threw);
}
}
@Override
public long getBlobLength(String encodedBlobId) throws IOException {
try {
checkNotNull(encodedBlobId, "BlobId must be specified");
BlobId id = BlobId.of(encodedBlobId);
if (encodeLengthInId && id.hasLengthInfo()) {
return id.length;
}
return getDataRecord(id.blobId).getLength();
} catch (DataStoreException e) {
throw new IOException(e);
}
}
@Override
public String getBlobId(@NotNull String reference) {
checkNotNull(reference);
DataRecord record;
try {
record = delegate.getRecordFromReference(reference);
if (record != null) {
return getBlobId(record);
}
} catch (DataStoreException e) {
log.warn("Unable to access the blobId for [{}]", reference, e);
}
return null;
}
@Override
public String getReference(@NotNull String encodedBlobId) {
checkNotNull(encodedBlobId);
String blobId = extractBlobId(encodedBlobId);
//Reference are not created for in memory record
if (InMemoryDataRecord.isInstance(blobId)) {
return null;
}
DataRecord record;
try {
record = delegate.getRecordIfStored(new DataIdentifier(blobId));
if (record != null) {
return record.getReference();
} else {
log.debug("No blob found for id [{}]", blobId);
}
} catch (DataStoreException e) {
log.warn("Unable to access the blobId for [{}]", blobId, e);
}
return null;
}
@Override
public InputStream getInputStream(final String encodedBlobId) throws IOException {
final BlobId blobId = BlobId.of(encodedBlobId);
if (encodeLengthInId
&& blobId.hasLengthInfo()
&& blobId.length <= maxCachedBinarySize) {
try {
byte[] content = cache.get(blobId.blobId, new Callable<byte[]>() {
@Override
public byte[] call() throws Exception {
boolean threw = true;
InputStream stream = getStream(blobId.blobId);
try {
byte[] result = IOUtils.toByteArray(stream);
threw = false;
return result;
} finally {
Closeables.close(stream, threw);
}
}
});
return new ByteArrayInputStream(content);
} catch (ExecutionException e) {
log.warn("Error occurred while loading bytes from steam while fetching for id {}", encodedBlobId, e);
}
}
try {
return getStream(blobId.blobId);
}
catch (IOException e) {
stats.downloadFailed(blobId.blobId);
throw e;
}
}
//~-------------------------------------------< GarbageCollectableBlobStore >
@Override
public void setBlockSize(int x) {
// nothing to do
}
@Override
public String writeBlob(String tempFileName) throws IOException {
File file = new File(tempFileName);
InputStream in = null;
try {
in = new FileInputStream(file);
return writeBlob(in);
} finally {
closeQuietly(in);
FileUtils.forceDelete(file);
}
}
@Override
public int sweep() throws IOException {
return 0;
}
@Override
public void startMark() throws IOException {
// nothing to do
}
@Override
public void clearInUse() {
delegate.clearInUse();
}
@Override
public void clearCache() {
// nothing to do
}
@Override
public long getBlockSizeMin() {
return 0;
}
@Override
public Iterator<String> getAllChunkIds(final long maxLastModifiedTime) throws Exception {
return transform(filter(getAllRecords(), new Predicate<DataRecord>() {
@Override
public boolean apply(@Nullable DataRecord input) {
if (input != null && (maxLastModifiedTime <= 0
|| input.getLastModified() < maxLastModifiedTime)) {
return true;
}
return false;
}
}), new Function<DataRecord, String>() {
@Override
public String apply(DataRecord input) {
if (encodeLengthInId) {
return BlobId.of(input).encodedValue();
}
return input.getIdentifier().toString();
}
});
}
@Override
public boolean deleteChunks(List<String> chunkIds, long maxLastModifiedTime) throws Exception {
return (chunkIds.size() == countDeleteChunks(chunkIds, maxLastModifiedTime));
}
@Override
public long countDeleteChunks(List<String> chunkIds, long maxLastModifiedTime) throws Exception {
int count = 0;
if (delegate instanceof MultiDataStoreAware) {
try {
List<String> deleted = Lists.newArrayListWithExpectedSize(512);
for (String chunkId : chunkIds) {
long start = System.nanoTime();
String blobId = extractBlobId(chunkId);
DataIdentifier identifier = new DataIdentifier(blobId);
DataRecord dataRecord = getRecordForId(identifier);
boolean success = (maxLastModifiedTime <= 0)
|| dataRecord.getLastModified() <= maxLastModifiedTime;
log.trace("Deleting blob [{}] with last modified date [{}] : [{}]", blobId,
dataRecord.getLastModified(), success);
if (success) {
((MultiDataStoreAware) delegate).deleteRecord(identifier);
deleted.add(blobId);
count++;
if (count % 512 == 0) {
log.info("Deleted blobs {}", deleted);
deleted.clear();
}
}
stats.deleted(blobId, System.nanoTime() - start, TimeUnit.NANOSECONDS);
stats.deleteCompleted(blobId);
}
if (!deleted.isEmpty()) {
log.info("Deleted blobs {}", deleted);
}
}
catch (Exception e) {
stats.deleteFailed();
throw e;
}
}
return count;
}
@Override
public Iterator<String> resolveChunks(String blobId) throws IOException {
if (!InMemoryDataRecord.isInstance(blobId)) {
return Iterators.singletonIterator(blobId);
}
return Collections.emptyIterator();
}
@Override
public void addMetadataRecord(InputStream stream, String name) throws DataStoreException {
if (delegate instanceof SharedDataStore) {
try {
long start = System.nanoTime();
((SharedDataStore) delegate).addMetadataRecord(stream, name);
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).metadataRecordAdded(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).addMetadataRecordCompleted(name);
}
}
catch (DataStoreException e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).addMetadataRecordFailed(name);
}
throw e;
}
}
}
@Override
public void addMetadataRecord(File f, String name) throws DataStoreException {
if (delegate instanceof SharedDataStore) {
try {
long start = System.nanoTime();
((SharedDataStore) delegate).addMetadataRecord(f, name);
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).metadataRecordAdded(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).addMetadataRecordCompleted(name);
}
}
catch (DataStoreException e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).addMetadataRecordFailed(name);
}
throw e;
}
}
}
@Override public DataRecord getMetadataRecord(String name) {
if (delegate instanceof SharedDataStore) {
try {
long start = System.nanoTime();
DataRecord record = ((SharedDataStore) delegate).getMetadataRecord(name);
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).getMetadataRecordCalled(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).getMetadataRecordCompleted(name);
}
return record;
}
catch (Exception e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).getMetadataRecordFailed(name);
}
throw e;
}
}
return null;
}
@Override
public boolean metadataRecordExists(String name) {
if (delegate instanceof SharedDataStore) {
try {
long start = System.nanoTime();
boolean exists = ((SharedDataStore) delegate).metadataRecordExists(name);
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).metadataRecordExistsCalled(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).metadataRecordExistsCompleted(name);
}
return exists;
}
catch (Exception e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).metadataRecordExistsFailed(name);
}
throw e;
}
}
return false;
}
@Override
public List<DataRecord> getAllMetadataRecords(String prefix) {
if (delegate instanceof SharedDataStore) {
try {
long start = System.nanoTime();
List<DataRecord> records = ((SharedDataStore) delegate).getAllMetadataRecords(prefix);
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).getAllMetadataRecordsCalled(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).getAllMetadataRecordsCompleted(prefix);
}
return records;
}
catch (Exception e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).getAllMetadataRecordsFailed(prefix);
}
throw e;
}
}
return null;
}
@Override
public boolean deleteMetadataRecord(String name) {
if (delegate instanceof SharedDataStore) {
try {
long start = System.nanoTime();
boolean deleted = ((SharedDataStore) delegate).deleteMetadataRecord(name);
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).metadataRecordDeleted(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).deleteMetadataRecordCompleted(name);
}
return deleted;
}
catch (Exception e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).deleteMetadataRecordFailed(name);
}
throw e;
}
}
return false;
}
@Override
public void deleteAllMetadataRecords(String prefix) {
if (delegate instanceof SharedDataStore) {
try {
long start = System.nanoTime();
((SharedDataStore) delegate).deleteAllMetadataRecords(prefix);
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).allMetadataRecordsDeleted(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).deleteAllMetadataRecordsCompleted(prefix);
}
}
catch (Exception e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).deleteAllMetadataRecordsFailed(prefix);
}
throw e;
}
}
}
@Override
public void setRepositoryId(String repositoryId) throws DataStoreException {
this.repositoryId = repositoryId;
addMetadataRecord(new ByteArrayInputStream(new byte[0]),
SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY.getNameFromId(repositoryId));
log.info("repositoryId registered in blobstore - [{}]", repositoryId);
}
@Override
public String getRepositoryId() {
return repositoryId;
}
@Override
public Iterator<DataRecord> getAllRecords() throws DataStoreException {
long start = System.nanoTime();
Iterator<DataRecord> result = delegate instanceof SharedDataStore ?
((SharedDataStore) delegate).getAllRecords() :
Iterators.transform(delegate.getAllIdentifiers(),
new Function<DataIdentifier, DataRecord>() {
@Nullable
@Override
public DataRecord apply(@Nullable DataIdentifier input) {
try {
return delegate.getRecord(input);
} catch (DataStoreException e) {
log.warn("Error occurred while fetching DataRecord for identifier {}", input, e);
}
return null;
}
});
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).getAllRecordsCalled(System.nanoTime() - start, TimeUnit.NANOSECONDS);
((ExtendedBlobStatsCollector) stats).getAllRecordsCompleted();
}
return result;
}
@Override
public DataRecord getRecordForId(DataIdentifier identifier) throws DataStoreException {
try {
long start = System.nanoTime();
DataRecord record = delegate instanceof SharedDataStore ?
((SharedDataStore) delegate).getRecordForId(identifier) :
delegate.getRecord(identifier);
if (stats instanceof ExtendedBlobStatsCollector) {
long elapsed = System.nanoTime() - start;
((ExtendedBlobStatsCollector) stats).getRecordForIdCalled(elapsed, TimeUnit.NANOSECONDS, record.getLength());
((ExtendedBlobStatsCollector) stats).getRecordForIdCompleted(identifier.toString());
}
return record;
}
catch (DataStoreException e) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).getRecordForIdFailed(identifier.toString());
}
throw e;
}
}
@Override
public Type getType() {
if (delegate instanceof SharedDataStore) {
return Type.SHARED;
}
return Type.DEFAULT;
}
@Override
public DataRecord addRecord(InputStream input, BlobOptions options) throws DataStoreException {
try {
long start = System.nanoTime();
DataRecord result = addRecordInternal(input, options);
stats.recordAdded(System.nanoTime() - start, TimeUnit.NANOSECONDS, result.getLength());
stats.addRecordCompleted(result.getIdentifier().toString());
return result;
}
catch (DataStoreException e) {
stats.addRecordFailed();
throw e;
}
}
private DataRecord addRecordInternal(InputStream input, BlobOptions options) throws DataStoreException {
return delegate instanceof TypedDataStore ?
((TypedDataStore) delegate).addRecord(input, options) :
delegate.addRecord(input);
}
//~---------------------------------------------< Object >
@Override
public String toString() {
return String.format("DataStore backed BlobStore [%s]", delegate.getClass().getName());
}
//~---------------------------------------------< Properties >
public DataStore getDataStore() {
return delegate;
}
public CacheStats getCacheStats() {
return cacheStats;
}
public void setMaxCachedBinarySize(int maxCachedBinarySize) {
this.maxCachedBinarySize = maxCachedBinarySize;
}
public void setBlobStatsCollector(BlobStatsCollector stats) {
this.stats = stats;
}
@Override
public void addTracker(BlobTracker tracker) {
this.tracker = tracker;
}
@Override
@Nullable
public BlobTracker getTracker() {
return tracker;
}
//~---------------------------------------------< Internal >
protected InputStream getStream(String blobId) throws IOException {
try {
long startTime = System.nanoTime();
InputStream in = getDataRecord(blobId).getStream();
if (!(in instanceof BufferedInputStream)){
in = new BufferedInputStream(in);
}
return StatsCollectingStreams.wrap(stats, blobId, in, startTime);
} catch (DataStoreException e) {
throw new IOException(e);
}
}
protected DataRecord getDataRecord(String blobId) throws DataStoreException {
DataRecord id;
if (InMemoryDataRecord.isInstance(blobId)) {
id = InMemoryDataRecord.getInstance(blobId);
} else {
id = delegate.getRecord(new DataIdentifier(blobId));
}
checkNotNull(id, "No DataRecord found for blobId [%s]", blobId);
return id;
}
private static boolean isInMemoryRecord(DataIdentifier identifier) {
return InMemoryDataRecord.isInstance(identifier.toString());
}
/**
* Create a BLOB value from in input stream. Small objects will create an in-memory object,
* while large objects are stored in the data store
*
* @param in the input stream
* @param options
* @return the value
*/
private DataRecord writeStream(InputStream in, BlobOptions options) throws IOException, DataStoreException {
int maxMemorySize = Math.max(0, delegate.getMinRecordLength() + 1);
byte[] buffer = new byte[maxMemorySize];
int pos = 0, len = maxMemorySize;
while (pos < maxMemorySize) {
int l = in.read(buffer, pos, len);
if (l < 0) {
break;
}
pos += l;
len -= l;
}
DataRecord record;
if (pos < maxMemorySize) {
// shrink the buffer
byte[] data = new byte[pos];
System.arraycopy(buffer, 0, data, 0, pos);
record = InMemoryDataRecord.getInstance(data);
} else {
// a few bytes are already read, need to re-build the input stream
in = new SequenceInputStream(new ByteArrayInputStream(buffer, 0, pos), in);
record = addRecordInternal(in, options);
}
return record;
}
private String getBlobId(DataRecord dr) {
if (encodeLengthInId) {
return BlobId.of(dr).encodedValue();
}
return dr.getIdentifier().toString();
}
protected String extractBlobId(String encodedBlobId) {
if (encodeLengthInId) {
return BlobId.of(encodedBlobId).blobId;
}
return encodedBlobId;
}
// <--------------- BlobAccessProvider implementation - Direct binary access feature --------------->
@Nullable
@Override
public BlobUpload initiateBlobUpload(long maxUploadSizeInBytes, int maxNumberOfURIs)
throws IllegalArgumentException {
return initiateBlobUpload(maxUploadSizeInBytes, maxNumberOfURIs, BlobUploadOptions.DEFAULT);
}
@Nullable
@Override
public BlobUpload initiateBlobUpload(long maxUploadSizeInBytes, int maxNumberOfURIs, @NotNull final BlobUploadOptions options)
throws IllegalArgumentException {
if (delegate instanceof DataRecordAccessProvider) {
try {
long start = System.nanoTime();
DataRecordAccessProvider provider = (DataRecordAccessProvider) this.delegate;
DataRecordUpload upload = provider.initiateDataRecordUpload(maxUploadSizeInBytes, maxNumberOfURIs,
DataRecordUploadOptions.fromBlobUploadOptions(options));
if (upload == null) {
if (stats instanceof ExtendedBlobStatsCollector) {
((ExtendedBlobStatsCollector) stats).initiateBlobUploadFailed();
}
return null;
}
if (stats instanceof ExtendedBlobStatsCollector) {