Skip to content

Commit

Permalink
TieredStorage: add debug information (apache#14907)
Browse files Browse the repository at this point in the history
* TieredStorage: add debug information
- enable SLF4 logging in JClouds
- add Pulsar Cluster name in Objects metadata
- log object names during offloading

(cherry picked from commit 83dad0a)
  • Loading branch information
eolivelli authored and lhotari committed May 9, 2022
1 parent d17110a commit fc37bd3
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ public interface LedgerOffloader {
// TODO: improve the user metadata in subsequent changes
String METADATA_SOFTWARE_VERSION_KEY = "S3ManagedLedgerOffloaderSoftwareVersion";
String METADATA_SOFTWARE_GITSHA_KEY = "S3ManagedLedgerOffloaderSoftwareGitSha";
String METADATA_PULSAR_CLUSTER_NAME = "pulsarClusterName";

/**
* Get offload driver name.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -941,7 +941,8 @@ public synchronized LedgerOffloader createManagedLedgerOffloader(OffloadPolicies
offloadPolicies,
ImmutableMap.of(
LedgerOffloader.METADATA_SOFTWARE_VERSION_KEY.toLowerCase(), PulsarVersion.getVersion(),
LedgerOffloader.METADATA_SOFTWARE_GITSHA_KEY.toLowerCase(), PulsarVersion.getGitSha()
LedgerOffloader.METADATA_SOFTWARE_GITSHA_KEY.toLowerCase(), PulsarVersion.getGitSha(),
LedgerOffloader.METADATA_PULSAR_CLUSTER_NAME.toLowerCase(), config.getClusterName()
),
schemaStorage,
getOffloaderScheduler(offloadPolicies));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import com.google.common.collect.Lists;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
Expand Down Expand Up @@ -137,14 +138,20 @@ public CompletableFuture<Void> offload(ReadHandle readHandle,
.withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize());
String dataBlockKey = DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid);
String indexBlockKey = DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid);
log.info("ledger {} dataBlockKey {} indexBlockKey {}", readHandle.getId(), dataBlockKey, indexBlockKey);

MultipartUpload mpu = null;
List<MultipartPart> parts = Lists.newArrayList();

// init multi part upload for data block.
try {
BlobBuilder blobBuilder = writeBlobStore.blobBuilder(dataBlockKey);
DataBlockUtils.addVersionInfo(blobBuilder, userMetadata);
Map<String, String> objectMetadata = new HashMap<>(userMetadata);
objectMetadata.put("role", "data");
if (extraMetadata != null) {
objectMetadata.putAll(extraMetadata);
}
DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata);
Blob blob = blobBuilder.build();
mpu = writeBlobStore.initiateMultipartUpload(config.getBucket(), blob.getMetadata(), new PutOptions());
} catch (Throwable t) {
Expand Down Expand Up @@ -207,7 +214,12 @@ public CompletableFuture<Void> offload(ReadHandle readHandle,
OffloadIndexBlock.IndexInputStream indexStream = index.toStream()) {
// write the index block
BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey);
DataBlockUtils.addVersionInfo(blobBuilder, userMetadata);
Map<String, String> objectMetadata = new HashMap<>(userMetadata);
objectMetadata.put("role", "index");
if (extraMetadata != null) {
objectMetadata.putAll(extraMetadata);
}
DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata);
Payload indexPayload = Payloads.newInputStreamPayload(indexStream);
indexPayload.getContentMetadata().setContentLength((long) indexStream.getStreamSize());
indexPayload.getContentMetadata().setContentType("application/octet-stream");
Expand Down

0 comments on commit fc37bd3

Please sign in to comment.