Skip to content
Permalink
Browse files

s3 upload and multipart working

  • Loading branch information
logscape committed Dec 13, 2019
1 parent 407725f commit 998c43e76647b1dd4478779e96e9c0b1ca3d87e5
@@ -71,10 +71,10 @@
<!-- </resource>-->
<!-- </resources>-->
<!-- <testSourceDirectory>test</testSourceDirectory>-->
<!-- <testResources>-->
<!-- <testResource>-->
<!-- <directory>test-data</directory>-->
<!-- </testResource></testResources>-->
<testResources>
<testResource>
<directory>test-data</directory>
</testResource></testResources>

<pluginManagement>
<plugins>
@@ -44,6 +44,12 @@
<groupId>io.quarkus</groupId>
<artifactId>quarkus-resteasy</artifactId>
</dependency>
<!-- https://mvnrepository.com/artifact/org.jboss.resteasy/resteasy-multipart-provider -->
<dependency>
<groupId>org.jboss.resteasy</groupId>
<artifactId>resteasy-multipart-provider</artifactId>
<version>4.4.1.Final</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
@@ -0,0 +1,185 @@
package com.liquidlabs.logscapeng.uploader;

import com.amazonaws.AmazonServiceException;
import com.amazonaws.SdkClientException;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.*;
import org.apache.commons.io.IOUtils;

import javax.enterprise.context.ApplicationScoped;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

@ApplicationScoped
public class AWSS3UploaderService {



public String upload(UploadMeta upload) {
Regions clientRegion = Regions.EU_WEST_2;
String bucketName = upload.tenant;
String keyName = upload.resource + "/" + upload.filename;
String filePath = upload.resource + "/" + upload.filename;

ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.addUserMetadata("tags", upload.tags);
objectMetadata.addUserMetadata("tenant", upload.tenant);
objectMetadata.addUserMetadata("length", ""+upload.filecontent.length);


File file = createTempFile(upload.filecontent);
long contentLength = file.length();
long partSize = 5 * 1024 * 1024; // Set part size to 5 MB.

try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard()
.withRegion(clientRegion)
.withCredentials(new ProfileCredentialsProvider())
.build();


if (!s3Client.doesBucketExistV2(upload.tenant)) {
s3Client.createBucket(upload.tenant);
}

// Create a list of ETag objects. You retrieve ETags for each object part uploaded,
// then, after each individual part has been uploaded, pass the list of ETags to
// the request to complete the upload.
List<PartETag> partETags = new ArrayList<PartETag>();

// Initiate the multipart upload.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName, objectMetadata);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);


// Upload the file parts.
long filePosition = 0;
for (int i = 1; filePosition < upload.filecontent.length; i++) {
// Because the last part could be less than 5 MB, adjust the part size as needed.
partSize = Math.min(partSize, (contentLength - filePosition));

// Create the request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(keyName)
.withUploadId(initResponse.getUploadId())
.withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);

// Upload the part and add the response's ETag to our list.
UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());

filePosition += partSize;
}

System.out.println("ETags:" + partETags);
// Complete the multipart upload.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName,
initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(compRequest);
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
} finally {
file.delete();
}

return "yay";

}

private File createTempFile(byte[] filecontent) {
try {
File tempFile = File.createTempFile("test", ".tmp");
FileOutputStream fos = new FileOutputStream(tempFile);
fos.write(filecontent);
fos.flush();
fos.close();
return tempFile;
} catch (IOException e) {
e.printStackTrace();
}
throw new RuntimeException("Failed to create temp file");
}


public static void main(String[] args) throws IOException {
Regions clientRegion = Regions.DEFAULT_REGION;
String bucketName = "*** Bucket name ***";
String keyName = "*** Key name ***";
String filePath = "*** Path to file to upload ***";

File file = new File(filePath);
long contentLength = file.length();
long partSize = 5 * 1024 * 1024; // Set part size to 5 MB.

try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard()
.withRegion(clientRegion)
.withCredentials(new ProfileCredentialsProvider())
.build();

// Create a list of ETag objects. You retrieve ETags for each object part uploaded,
// then, after each individual part has been uploaded, pass the list of ETags to
// the request to complete the upload.
List<PartETag> partETags = new ArrayList<PartETag>();

// Initiate the multipart upload.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

// Upload the file parts.
long filePosition = 0;
for (int i = 1; filePosition < contentLength; i++) {
// Because the last part could be less than 5 MB, adjust the part size as needed.
partSize = Math.min(partSize, (contentLength - filePosition));

// Create the request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(keyName)
.withUploadId(initResponse.getUploadId())
.withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);

// Upload the part and add the response's ETag to our list.
UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());

filePosition += partSize;
}

// Complete the multipart upload.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName,
initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(compRequest);
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
}
}



}

This file was deleted.

@@ -0,0 +1,79 @@
package com.liquidlabs.logscapeng.uploader;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import com.amazonaws.AmazonServiceException;
import com.amazonaws.SdkClientException;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.*;
import org.acme.quickstart.GreetingService;
import org.apache.commons.io.IOUtils;
import org.jboss.resteasy.annotations.providers.multipart.MultipartForm;
import org.jboss.resteasy.plugins.providers.multipart.InputPart;
import org.jboss.resteasy.plugins.providers.multipart.MultipartFormDataInput;
import org.jboss.resteasy.annotations.providers.multipart.MultipartForm;

import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;

/**
* First (naive) implementation.
* Server side uploader the runs with AWS Client credentials.
* Loads directly to S3 bucket, driven by a REST based client that does a binary post.
*
* A Lambda wont handle the volume of data upload.
*/
@Path("/upload")
public class SimpleServersideUploaderResource {

@Inject
AWSS3UploaderService awss3UploaderService;


@GET
@Produces(MediaType.TEXT_PLAIN)
public String id() {
return SimpleServersideUploaderResource.class.getCanonicalName();
}

@POST
@Path("/file")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Produces(MediaType.TEXT_PLAIN)
public Response uploadFile(@MultipartForm UploadMeta uploadMeta) throws IOException {
System.out.println("GOT FILe:" + uploadMeta);
String upload = awss3UploaderService.upload(uploadMeta);
return Response.status(200).entity(upload).build();
// return Response.status(200).entity("Uploaded file name : " + uploadMeta.filename).build();
// return null;
}


private String getFileName(MultivaluedMap<String, String> header) {

String[] contentDisposition = header.getFirst("Content-Disposition").split(";");

for (String filename : contentDisposition) {

if ((filename.trim().startsWith("filename"))) {

String[] name = filename.split("=");

return name[1].trim().replaceAll("\"", "");
}
}
return "unknown";
}
}

0 comments on commit 998c43e

Please sign in to comment.
You can’t perform that action at this time.