Skip to content

Commit

Permalink
Use our own minimal S3Mock rather than findify S3Mock (#1806)
Browse files Browse the repository at this point in the history
* Use our own minimal S3Mock rather than findify S3Mock

The findify mock's project isn't very active, and is missing some
functionallity, such as setting metadata on multipart objects (it's in
the code base, but there's no release). Rather than wait for them to
release with features we need, we should use our own mock, which we
can update ourselves as needed. Changed to the mock should be
validated against the real S3 using -DtestRealAWS=true.

Master Issue: #1511

* fix test failures
  • Loading branch information
ivankelly authored and merlimat committed May 23, 2018
1 parent 928ba3d commit b0b0891
Show file tree
Hide file tree
Showing 6 changed files with 346 additions and 51 deletions.
8 changes: 0 additions & 8 deletions pom.xml
Expand Up @@ -154,7 +154,6 @@ flexible messaging model and an intuitive client API.</description>

<!-- test dependencies -->
<disruptor.version>3.4.0</disruptor.version>
<s3mock.version>0.2.5</s3mock.version>

<!-- Plugin dependencies -->
<protobuf-maven-plugin.version>0.5.0</protobuf-maven-plugin.version>
Expand Down Expand Up @@ -729,13 +728,6 @@ flexible messaging model and an intuitive client API.</description>
<artifactId>disruptor</artifactId>
<version>${disruptor.version}</version>
</dependency>

<dependency>
<groupId>io.findify</groupId>
<artifactId>s3mock_2.12</artifactId>
<version>${s3mock.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</dependencyManagement>

Expand Down
6 changes: 0 additions & 6 deletions pulsar-broker/pom.xml
Expand Up @@ -148,12 +148,6 @@
<artifactId>aws-java-sdk-s3</artifactId>
</dependency>

<dependency>
<groupId>io.findify</groupId>
<artifactId>s3mock_2.12</artifactId>
<scope>test</scope>
</dependency>

<!-- functions related dependencies (begin) -->

<dependency>
Expand Down
Expand Up @@ -118,7 +118,7 @@ public CompletableFuture<Void> offload(ReadHandle readHandle,
.withLedgerMetadata(readHandle.getLedgerMetadata());
String dataBlockKey = dataBlockOffloadKey(readHandle.getId(), uuid);
String indexBlockKey = indexBlockOffloadKey(readHandle.getId(), uuid);
InitiateMultipartUploadRequest dataBlockReq = new InitiateMultipartUploadRequest(bucket, dataBlockKey);
InitiateMultipartUploadRequest dataBlockReq = new InitiateMultipartUploadRequest(bucket, dataBlockKey, new ObjectMetadata());
InitiateMultipartUploadResult dataBlockRes = null;

// init multi part upload for data block.
Expand Down Expand Up @@ -172,9 +172,9 @@ public CompletableFuture<Void> offload(ReadHandle readHandle,
.withUploadId(dataBlockRes.getUploadId())
.withPartETags(etags));
} catch (Throwable t) {
promise.completeExceptionally(t);
s3client.abortMultipartUpload(
new AbortMultipartUploadRequest(bucket, dataBlockKey, dataBlockRes.getUploadId()));
promise.completeExceptionally(t);
return;
}

Expand All @@ -191,8 +191,8 @@ public CompletableFuture<Void> offload(ReadHandle readHandle,
metadata));
promise.complete(null);
} catch (Throwable t) {
s3client.deleteObject(bucket, dataBlockKey);
promise.completeExceptionally(t);
s3client.deleteObject(bucket, dataBlockKey);
return;
}
});
Expand Down
Expand Up @@ -116,13 +116,8 @@ public void testHappyCase() throws Exception {

@Test
public void testBucketDoesNotExist() throws Exception {
ServiceConfiguration conf = new ServiceConfiguration();
conf.setManagedLedgerOffloadDriver(S3ManagedLedgerOffloader.DRIVER_NAME);
conf.setS3ManagedLedgerOffloadBucket("no-bucket");
conf.setS3ManagedLedgerOffloadServiceEndpoint(s3endpoint);
conf.setS3ManagedLedgerOffloadRegion("eu-west-1");
LedgerOffloader offloader = S3ManagedLedgerOffloader.create(conf, scheduler);

LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, "no-bucket", scheduler,
DEFAULT_BLOCK_SIZE, DEFAULT_READ_BUFFER_SIZE);
try {
offloader.offload(buildReadHandle(), UUID.randomUUID(), new HashMap<>()).get();
Assert.fail("Shouldn't be able to add to bucket");
Expand Down Expand Up @@ -383,11 +378,10 @@ public void testOffloadReadInvalidEntryIds() throws Exception {

@Test
public void testDeleteOffloaded() throws Exception {
int maxBlockSize = 1024;
int entryCount = 3;
ReadHandle readHandle = buildReadHandle(maxBlockSize, entryCount);
ReadHandle readHandle = buildReadHandle(DEFAULT_BLOCK_SIZE, 1);
UUID uuid = UUID.randomUUID();
LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler, maxBlockSize, DEFAULT_READ_BUFFER_SIZE);
LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler,
DEFAULT_BLOCK_SIZE, DEFAULT_READ_BUFFER_SIZE);

// verify object exist after offload
offloader.offload(readHandle, uuid, new HashMap<>()).get();
Expand All @@ -402,11 +396,10 @@ public void testDeleteOffloaded() throws Exception {

@Test
public void testDeleteOffloadedFail() throws Exception {
int maxBlockSize = 1024;
int entryCount = 3;
ReadHandle readHandle = buildReadHandle(maxBlockSize, entryCount);
ReadHandle readHandle = buildReadHandle(DEFAULT_BLOCK_SIZE, 1);
UUID uuid = UUID.randomUUID();
LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler, maxBlockSize, DEFAULT_READ_BUFFER_SIZE);
LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler,
DEFAULT_BLOCK_SIZE, DEFAULT_READ_BUFFER_SIZE);
String failureString = "fail deleteOffloaded";
AmazonS3 mockS3client = Mockito.spy(s3client);
Mockito
Expand Down

0 comments on commit b0b0891

Please sign in to comment.