Original file line number Diff line number Diff line change
Expand Up @@ -33,31 +33,43 @@
* Example showing use of AWS KMS CMP with record encryption functions directly.
*/
public class AwsKmsEncryptedItem {

private static final String STRING_FIELD_NAME = "example";
private static final String BINARY_FIELD_NAME = "and some binary";
private static final String NUMBER_FIELD_NAME = "some numbers";
private static final String IGNORED_FIELD_NAME = "leave me";

public static void main(String[] args) throws GeneralSecurityException {
final String tableName = args[0];
final String cmkArn = args[1];
final String region = args[2];

encryptRecord(tableName, cmkArn, region);

AWSKMS kms = null;
try {
kms = AWSKMSClientBuilder.standard().withRegion(region).build();
encryptRecord(tableName, cmkArn, kms);
} finally {
if (kms != null) {
kms.shutdown();
}
}
}

private static void encryptRecord(String tableName, String cmkArn, String region) throws GeneralSecurityException {
public static void encryptRecord(final String tableName, final String cmkArn, final AWSKMS kmsClient) throws GeneralSecurityException {
// Sample record to be encrypted
final String partitionKeyName = "partition_attribute";
final String sortKeyName = "sort_attribute";
final Map<String, AttributeValue> record = new HashMap<>();
record.put(partitionKeyName, new AttributeValue().withS("is this"));
record.put(sortKeyName, new AttributeValue().withN("55"));
record.put("example", new AttributeValue().withS("data"));
record.put("some numbers", new AttributeValue().withN("99"));
record.put("and some binary", new AttributeValue().withB(ByteBuffer.wrap(new byte[]{0x00, 0x01, 0x02})));
record.put("leave me", new AttributeValue().withS("alone")); // We want to ignore this attribute
record.put(STRING_FIELD_NAME, new AttributeValue().withS("data"));
record.put(NUMBER_FIELD_NAME, new AttributeValue().withN("99"));
record.put(BINARY_FIELD_NAME, new AttributeValue().withB(ByteBuffer.wrap(new byte[]{0x00, 0x01, 0x02})));
record.put(IGNORED_FIELD_NAME, new AttributeValue().withS("alone")); // We want to ignore this attribute

// Set up our configuration and clients. All of this is thread-safe and can be reused across calls.
// This example assumes we already have a AWS KMS client `kmsClient`
// Provider Configuration
final AWSKMS kms = AWSKMSClientBuilder.standard().withRegion(region).build();
final DirectKmsMaterialProvider cmp = new DirectKmsMaterialProvider(kms, cmkArn);
final DirectKmsMaterialProvider cmp = new DirectKmsMaterialProvider(kmsClient, cmkArn);
// Encryptor creation
final DynamoDBEncryptor encryptor = DynamoDBEncryptor.getInstance(cmp);

Expand All @@ -79,7 +91,7 @@ private static void encryptRecord(String tableName, String cmkArn, String region
// Partition and sort keys must not be encrypted but should be signed
actions.put(attributeName, signOnly);
break;
case "leave me":
case IGNORED_FIELD_NAME:
// For this example, we are neither signing nor encrypting this field
break;
default:
Expand All @@ -93,6 +105,12 @@ private static void encryptRecord(String tableName, String cmkArn, String region
// Encrypt the plaintext record directly
final Map<String, AttributeValue> encrypted_record = encryptor.encryptRecord(record, actions, encryptionContext);

// Encrypted record fields change as expected
assert encrypted_record.get(STRING_FIELD_NAME).getB() != null; // the encrypted string is stored as bytes
assert encrypted_record.get(NUMBER_FIELD_NAME).getB() != null; // the encrypted number is stored as bytes
assert !record.get(BINARY_FIELD_NAME).getB().equals(encrypted_record.get(BINARY_FIELD_NAME).getB()); // the encrypted bytes have updated
assert record.get(IGNORED_FIELD_NAME).getS().equals(encrypted_record.get(IGNORED_FIELD_NAME).getS()); // ignored field is left as is

// We could now put the encrypted item to DynamoDB just as we would any other item.
// We're skipping it to to keep the example simpler.

Expand All @@ -102,5 +120,10 @@ private static void encryptRecord(String tableName, String cmkArn, String region
// Decryption is identical. We'll pretend that we retrieved the record from DynamoDB.
final Map<String, AttributeValue> decrypted_record = encryptor.decryptRecord(encrypted_record, actions, encryptionContext);
System.out.println("Decrypted Record: " + decrypted_record);
}

// The decrypted fields match the original fields before encryption
assert record.get(STRING_FIELD_NAME).getS().equals(decrypted_record.get(STRING_FIELD_NAME).getS());
assert record.get(NUMBER_FIELD_NAME).getN().equals(decrypted_record.get(NUMBER_FIELD_NAME).getN());
assert record.get(BINARY_FIELD_NAME).getB().equals(decrypted_record.get(BINARY_FIELD_NAME).getB());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
*/
package com.amazonaws.examples;

import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import java.util.Arrays;
import java.util.HashMap;
Expand All @@ -38,18 +39,42 @@

/**
* This demonstrates how to use the {@link DynamoDBMapper} with the {@link AttributeEncryptor}
* to encrypt your data. Before you can use this you need to set up a table called "ExampleTable"
* to encrypt your data. Before you can use this you need to set up a DynamoDB table called "ExampleTable"
* to hold the encrypted data.
* "ExampleTable" should have a partition key named "partition_attribute" for Strings
* and a sort (range) key named "sort_attribute" for numbers.
*/
public class AwsKmsEncryptedObject {
public static final String EXAMPLE_TABLE_NAME = "ExampleTable";
public static final String PARTITION_ATTRIBUTE = "partition_attribute";
public static final String SORT_ATTRIBUTE = "sort_attribute";

private static final String STRING_FIELD_NAME = "example";
private static final String BINARY_FIELD_NAME = "and some binary";
private static final String NUMBER_FIELD_NAME = "some numbers";
private static final String IGNORED_FIELD_NAME = "leave me";

public static void main(String[] args) throws GeneralSecurityException {
final String cmkArn = args[0];
final String region = args[1];

encryptRecord(cmkArn, region);

AmazonDynamoDB ddb = null;
AWSKMS kms = null;
try {
ddb = AmazonDynamoDBClientBuilder.standard().withRegion(region).build();
kms = AWSKMSClientBuilder.standard().withRegion(region).build();
encryptRecord(cmkArn, ddb, kms);
} finally {
if (ddb != null) {
ddb.shutdown();
}
if (kms != null) {
kms.shutdown();
}
}
}

public static void encryptRecord(final String cmkArn, final String region) {
public static void encryptRecord(final String cmkArn, final AmazonDynamoDB ddbClient, final AWSKMS kmsClient) {
// Sample object to be encrypted
DataPoJo record = new DataPoJo();
record.setPartitionAttribute("is this");
Expand All @@ -60,33 +85,44 @@ public static void encryptRecord(final String cmkArn, final String region) {
record.setLeaveMe("alone");

// Set up our configuration and clients
final AmazonDynamoDB ddb = AmazonDynamoDBClientBuilder.standard().withRegion(region).build();
final AWSKMS kms = AWSKMSClientBuilder.standard().withRegion(region).build();
final DirectKmsMaterialProvider cmp = new DirectKmsMaterialProvider(kms, cmkArn);
// This example assumes we already have a DynamoDB client `ddbClient` and AWS KMS client `kmsClient`
final DirectKmsMaterialProvider cmp = new DirectKmsMaterialProvider(kmsClient, cmkArn);
// Encryptor creation
final DynamoDBEncryptor encryptor = DynamoDBEncryptor.getInstance(cmp);
// Mapper Creation
// Please note the use of SaveBehavior.PUT (SaveBehavior.CLOBBER works as well).
// Omitting this can result in data-corruption.
DynamoDBMapperConfig mapperConfig = DynamoDBMapperConfig.builder().withSaveBehavior(SaveBehavior.PUT).build();
DynamoDBMapper mapper = new DynamoDBMapper(ddb, mapperConfig, new AttributeEncryptor(encryptor));
DynamoDBMapper mapper = new DynamoDBMapper(ddbClient, mapperConfig, new AttributeEncryptor(encryptor));

System.out.println("Plaintext Record: " + record);
// Save the item to the DynamoDB table
mapper.save(record);

// Retrieve the encrypted item (directly without decrypting) from Dynamo so we can see it in our example
final Map<String, AttributeValue> itemKey = new HashMap<>();
itemKey.put("partition_attribute", new AttributeValue().withS("is this"));
itemKey.put("sort_attribute", new AttributeValue().withN("55"));
System.out.println("Encrypted Record: " + ddb.getItem("ExampleTable", itemKey).getItem());

itemKey.put(PARTITION_ATTRIBUTE, new AttributeValue().withS("is this"));
itemKey.put(SORT_ATTRIBUTE, new AttributeValue().withN("55"));
final Map<String, AttributeValue> encrypted_record = ddbClient.getItem(EXAMPLE_TABLE_NAME, itemKey).getItem();
System.out.println("Encrypted Record: " + encrypted_record);

// Encrypted record fields change as expected
assert encrypted_record.get(STRING_FIELD_NAME).getB() != null; // the encrypted string is stored as bytes
assert encrypted_record.get(NUMBER_FIELD_NAME).getB() != null; // the encrypted number is stored as bytes
assert !ByteBuffer.wrap(record.getSomeBinary()).equals(encrypted_record.get(BINARY_FIELD_NAME).getB()); // the encrypted bytes have updated
assert record.getLeaveMe().equals(encrypted_record.get(IGNORED_FIELD_NAME).getS()); // ignored field is left as is

// Retrieve (and decrypt) it from DynamoDB
DataPoJo decrypted_record = mapper.load(DataPoJo.class, "is this", 55);
System.out.println("Decrypted Record: " + decrypted_record);

// The decrypted fields match the original fields before encryption
assert record.getExample().equals(decrypted_record.getExample());
assert record.getSomeNumbers() == decrypted_record.getSomeNumbers();
assert Arrays.equals(record.getSomeBinary(), decrypted_record.getSomeBinary());
}

@DynamoDBTable(tableName = "ExampleTable")
@DynamoDBTable(tableName = EXAMPLE_TABLE_NAME)
public static final class DataPoJo {
private String partitionAttribute;
private int sortAttribute;
Expand All @@ -95,7 +131,7 @@ public static final class DataPoJo {
private byte[] someBinary;
private String leaveMe;

@DynamoDBHashKey(attributeName = "partition_attribute")
@DynamoDBHashKey(attributeName = PARTITION_ATTRIBUTE)
public String getPartitionAttribute() {
return partitionAttribute;
}
Expand All @@ -104,7 +140,7 @@ public void setPartitionAttribute(String partitionAttribute) {
this.partitionAttribute = partitionAttribute;
}

@DynamoDBRangeKey(attributeName = "sort_attribute")
@DynamoDBRangeKey(attributeName = SORT_ATTRIBUTE)
public int getSortAttribute() {
return sortAttribute;
}
Expand All @@ -113,7 +149,7 @@ public void setSortAttribute(int sortAttribute) {
this.sortAttribute = sortAttribute;
}

@DynamoDBAttribute(attributeName = "example")
@DynamoDBAttribute(attributeName = STRING_FIELD_NAME)
public String getExample() {
return example;
}
Expand All @@ -122,7 +158,7 @@ public void setExample(String example) {
this.example = example;
}

@DynamoDBAttribute(attributeName = "some numbers")
@DynamoDBAttribute(attributeName = NUMBER_FIELD_NAME)
public long getSomeNumbers() {
return someNumbers;
}
Expand All @@ -131,7 +167,7 @@ public void setSomeNumbers(long someNumbers) {
this.someNumbers = someNumbers;
}

@DynamoDBAttribute(attributeName = "and some binary")
@DynamoDBAttribute(attributeName = BINARY_FIELD_NAME)
public byte[] getSomeBinary() {
return someBinary;
}
Expand All @@ -140,7 +176,7 @@ public void setSomeBinary(byte[] someBinary) {
this.someBinary = someBinary;
}

@DynamoDBAttribute(attributeName = "leave me")
@DynamoDBAttribute(attributeName = IGNORED_FIELD_NAME)
@DoNotTouch
public String getLeaveMe() {
return leaveMe;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,23 @@

import static com.amazonaws.services.dynamodbv2.datamodeling.encryption.utils.EncryptionContextOperators.overrideEncryptionContextTableNameUsingMap;

/**
* This demonstrates how to use an operator to override the table name used in the encryption context.
* Before you can use this you need to set up a DynamoDB table called "ExampleTableForEncryptionContextOverrides"
* to hold the encrypted data.
* "ExampleTableForEncryptionContextOverrides" should have a partition key named "partition_attribute" for Strings
* and a sort (range) key named "sort_attribute" for numbers.
*/
public class EncryptionContextOverridesWithDynamoDBMapper {
public static final String TABLE_NAME_TO_OVERRIDE = "ExampleTableForEncryptionContextOverrides";
public static final String PARTITION_ATTRIBUTE = "partition_attribute";
public static final String SORT_ATTRIBUTE = "sort_attribute";

private static final String STRING_FIELD_NAME = "example";
private static final String BINARY_FIELD_NAME = "and some binary";
private static final String NUMBER_FIELD_NAME = "some numbers";
private static final String IGNORED_FIELD_NAME = "leave me";

public static void main(String[] args) throws GeneralSecurityException {
final String cmkArn = args[0];
final String region = args[1];
Expand All @@ -63,20 +79,21 @@ public static void main(String[] args) throws GeneralSecurityException {

public static void encryptRecord(final String cmkArn,
final String newEncryptionContextTableName,
AmazonDynamoDB ddb,
AWSKMS kms) throws GeneralSecurityException {
AmazonDynamoDB ddbClient,
AWSKMS kmsClient) throws GeneralSecurityException {
// Sample object to be encrypted
ExampleItem record = new ExampleItem();
record.setPartitionAttribute("is this");
record.setSortAttribute(55);
record.setExample("my data");

// Set up our configuration and clients
final DirectKmsMaterialProvider cmp = new DirectKmsMaterialProvider(kms, cmkArn);
// This example assumes we already have a DynamoDB client `ddbClient` and AWS KMS client `kmsClient`
final DirectKmsMaterialProvider cmp = new DirectKmsMaterialProvider(kmsClient, cmkArn);
final DynamoDBEncryptor encryptor = DynamoDBEncryptor.getInstance(cmp);

Map<String, String> tableNameEncryptionContextOverrides = new HashMap<>();
tableNameEncryptionContextOverrides.put("ExampleTableForEncryptionContextOverrides", newEncryptionContextTableName);
tableNameEncryptionContextOverrides.put(TABLE_NAME_TO_OVERRIDE, newEncryptionContextTableName);
tableNameEncryptionContextOverrides.put("AnotherExampleTableForEncryptionContextOverrides", "this table doesn't exist");

// Supply an operator to override the table name used in the encryption context
Expand All @@ -89,7 +106,7 @@ public static void encryptRecord(final String cmkArn,
// Omitting this can result in data-corruption.
DynamoDBMapperConfig mapperConfig = DynamoDBMapperConfig.builder()
.withSaveBehavior(DynamoDBMapperConfig.SaveBehavior.PUT).build();
DynamoDBMapper mapper = new DynamoDBMapper(ddb, mapperConfig, new AttributeEncryptor(encryptor));
DynamoDBMapper mapper = new DynamoDBMapper(ddbClient, mapperConfig, new AttributeEncryptor(encryptor));

System.out.println("Plaintext Record: " + record.toString());
// Save the record to the DynamoDB table
Expand All @@ -99,41 +116,47 @@ public static void encryptRecord(final String cmkArn,
ExampleItem decrypted_record = mapper.load(ExampleItem.class, "is this", 55);
System.out.println("Decrypted Record: " + decrypted_record.toString());

// The decrypted field matches the original field before encryption
assert record.getExample().equals(decrypted_record.getExample());

// Setup new configuration to decrypt without using an overridden EncryptionContext
final Map<String, AttributeValue> itemKey = new HashMap<>();
itemKey.put("partition_attribute", new AttributeValue().withS("is this"));
itemKey.put("sort_attribute", new AttributeValue().withN("55"));
itemKey.put(PARTITION_ATTRIBUTE, new AttributeValue().withS("is this"));
itemKey.put(SORT_ATTRIBUTE, new AttributeValue().withN("55"));

final EnumSet<EncryptionFlags> signOnly = EnumSet.of(EncryptionFlags.SIGN);
final EnumSet<EncryptionFlags> encryptAndSign = EnumSet.of(EncryptionFlags.ENCRYPT, EncryptionFlags.SIGN);
final Map<String, AttributeValue> encryptedItem = ddb.getItem("ExampleTableForEncryptionContextOverrides", itemKey)
final Map<String, AttributeValue> encryptedItem = ddbClient.getItem(TABLE_NAME_TO_OVERRIDE, itemKey)
.getItem();
System.out.println("Encrypted Record: " + encryptedItem);

Map<String, Set<EncryptionFlags>> encryptionFlags = new HashMap<>();
encryptionFlags.put("partition_attribute", signOnly);
encryptionFlags.put("sort_attribute", signOnly);
encryptionFlags.put("example", encryptAndSign);
encryptionFlags.put(PARTITION_ATTRIBUTE, signOnly);
encryptionFlags.put(SORT_ATTRIBUTE, signOnly);
encryptionFlags.put(STRING_FIELD_NAME, encryptAndSign);

final DynamoDBEncryptor encryptorWithoutOverrides = DynamoDBEncryptor.getInstance(cmp);

// Decrypt the record without using an overridden EncryptionContext
encryptorWithoutOverrides.decryptRecord(encryptedItem,
Map<String, AttributeValue> decrypted_without_override_record = encryptorWithoutOverrides.decryptRecord(encryptedItem,
encryptionFlags,
new EncryptionContext.Builder().withHashKeyName("partition_attribute")
.withRangeKeyName("sort_attribute")
new EncryptionContext.Builder().withHashKeyName(PARTITION_ATTRIBUTE)
.withRangeKeyName(SORT_ATTRIBUTE)
.withTableName(newEncryptionContextTableName)
.build());
System.out.printf("The example item was encrypted using the table name '%s' in the EncryptionContext%n", newEncryptionContextTableName);

// The decrypted field matches the original field before encryption
assert record.getExample().equals(decrypted_without_override_record.get(STRING_FIELD_NAME).getS());
}

@DynamoDBTable(tableName = "ExampleTableForEncryptionContextOverrides")
@DynamoDBTable(tableName = TABLE_NAME_TO_OVERRIDE)
public static final class ExampleItem {
private String partitionAttribute;
private int sortAttribute;
private String example;

@DynamoDBHashKey(attributeName = "partition_attribute")
@DynamoDBHashKey(attributeName = PARTITION_ATTRIBUTE)
public String getPartitionAttribute() {
return partitionAttribute;
}
Expand All @@ -142,7 +165,7 @@ public void setPartitionAttribute(String partitionAttribute) {
this.partitionAttribute = partitionAttribute;
}

@DynamoDBRangeKey(attributeName = "sort_attribute")
@DynamoDBRangeKey(attributeName = SORT_ATTRIBUTE)
public int getSortAttribute() {
return sortAttribute;
}
Expand All @@ -151,7 +174,7 @@ public void setSortAttribute(int sortAttribute) {
this.sortAttribute = sortAttribute;
}

@DynamoDBAttribute(attributeName = "example")
@DynamoDBAttribute(attributeName = STRING_FIELD_NAME)
public String getExample() {
return example;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,69 +26,88 @@
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.DynamoDBEncryptor;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.EncryptionContext;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.EncryptionFlags;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers.CachingMostRecentProvider;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers.DirectKmsMaterialProvider;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers.MostRecentProvider;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers.store.MetaStore;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput;
import com.amazonaws.services.kms.AWSKMS;
import com.amazonaws.services.kms.AWSKMSClientBuilder;

/**
* This demonstrates how to use the {@link MostRecentProvider} backed by a
* This demonstrates how to use the {@link CachingMostRecentProvider} backed by a
* {@link MetaStore} and the {@link DirectKmsMaterialProvider} to encrypt
* your data. Before you can use this, you need to set up a table to hold the
* intermediate keys.
* intermediate keys or use --setup mode to construct the table once
* and then re-run the example without the --setup mode
*/
public class MostRecentEncryptedItem {

public static final String PARTITION_ATTRIBUTE = "partition_attribute";
public static final String SORT_ATTRIBUTE = "sort_attribute";

private static final String STRING_FIELD_NAME = "example";
private static final String BINARY_FIELD_NAME = "and some binary";
private static final String NUMBER_FIELD_NAME = "some numbers";
private static final String IGNORED_FIELD_NAME = "leave me";

public static void main(String[] args) throws GeneralSecurityException {
final String mode = args[0];
final String region = args[1];
final String tableName = args[2];
final String keyTableName = args[3];
final String cmkArn = args[4];
final String materialName = args[5];

if (mode.equalsIgnoreCase("--setup")) {
AmazonDynamoDB ddb = AmazonDynamoDBClientBuilder.standard().withRegion(region).build();
MetaStore.createTable(ddb, keyTableName, new ProvisionedThroughput(1L, 1L));
return;
}

encryptRecord(tableName, keyTableName, region, cmkArn, materialName);

AmazonDynamoDB ddb = null;
AWSKMS kms = null;
try {
ddb = AmazonDynamoDBClientBuilder.standard().withRegion(region).build();
kms = AWSKMSClientBuilder.standard().withRegion(region).build();
encryptRecord(tableName, keyTableName, cmkArn, materialName, ddb, kms);
} finally {
if (ddb != null) {
ddb.shutdown();
}
if (kms != null) {
kms.shutdown();
}
}
}

private static void encryptRecord(String tableName, String keyTableName, String region, String cmkArn, String materialName) throws GeneralSecurityException {
public static void encryptRecord(String tableName, String keyTableName, String cmkArn, String materialName,
AmazonDynamoDB ddbClient, AWSKMS kmsClient) throws GeneralSecurityException {
// Sample record to be encrypted
final String partitionKeyName = "partition_attribute";
final String sortKeyName = "sort_attribute";
final Map<String, AttributeValue> record = new HashMap<>();
record.put(partitionKeyName, new AttributeValue().withS("is this"));
record.put(sortKeyName, new AttributeValue().withN("55"));
record.put("example", new AttributeValue().withS("data"));
record.put("some numbers", new AttributeValue().withN("99"));
record.put("and some binary", new AttributeValue().withB(ByteBuffer.wrap(new byte[]{0x00, 0x01, 0x02})));
record.put("leave me", new AttributeValue().withS("alone")); // We want to ignore this attribute
record.put(PARTITION_ATTRIBUTE, new AttributeValue().withS("is this"));
record.put(SORT_ATTRIBUTE, new AttributeValue().withN("55"));
record.put(STRING_FIELD_NAME, new AttributeValue().withS("data"));
record.put(NUMBER_FIELD_NAME, new AttributeValue().withN("99"));
record.put(BINARY_FIELD_NAME, new AttributeValue().withB(ByteBuffer.wrap(new byte[]{0x00, 0x01, 0x02})));
record.put(IGNORED_FIELD_NAME, new AttributeValue().withS("alone")); // We want to ignore this attribute

// Set up our configuration and clients. All of this is thread-safe and can be reused across calls.
// Provider Configuration to protect the data keys
final AmazonDynamoDB ddb = AmazonDynamoDBClientBuilder.standard().withRegion(region).build();
final AWSKMS kms = AWSKMSClientBuilder.standard().withRegion(region).build();
final DirectKmsMaterialProvider kmsProv = new DirectKmsMaterialProvider(kms, cmkArn);
// This example assumes we already have a DynamoDB client `ddbClient` and AWS KMS client `kmsClient`
final DirectKmsMaterialProvider kmsProv = new DirectKmsMaterialProvider(kmsClient, cmkArn);
final DynamoDBEncryptor keyEncryptor = DynamoDBEncryptor.getInstance(kmsProv);
final MetaStore metaStore = new MetaStore(ddb, keyTableName, keyEncryptor);
final MetaStore metaStore = new MetaStore(ddbClient, keyTableName, keyEncryptor);
//Provider configuration to protect the data
final MostRecentProvider cmp = new MostRecentProvider(metaStore, materialName, 60_000);
final CachingMostRecentProvider cmp = new CachingMostRecentProvider(metaStore, materialName, 60_000);

// Encryptor creation
final DynamoDBEncryptor encryptor = DynamoDBEncryptor.getInstance(cmp);

// Information about the context of our data (normally just Table information)
final EncryptionContext encryptionContext = new EncryptionContext.Builder()
.withTableName(tableName)
.withHashKeyName(partitionKeyName)
.withRangeKeyName(sortKeyName)
.withHashKeyName(PARTITION_ATTRIBUTE)
.withRangeKeyName(SORT_ATTRIBUTE)
.build();

// Describe what actions need to be taken for each attribute
Expand All @@ -97,12 +116,12 @@ private static void encryptRecord(String tableName, String keyTableName, String
final Map<String, Set<EncryptionFlags>> actions = new HashMap<>();
for (final String attributeName : record.keySet()) {
switch (attributeName) {
case partitionKeyName: // fall through
case sortKeyName:
case PARTITION_ATTRIBUTE: // fall through
case SORT_ATTRIBUTE:
// Partition and sort keys must not be encrypted but should be signed
actions.put(attributeName, signOnly);
break;
case "leave me":
case IGNORED_FIELD_NAME:
// For this example, we are neither signing nor encrypting this field
break;
default:
Expand All @@ -116,6 +135,12 @@ private static void encryptRecord(String tableName, String keyTableName, String
// Encrypt the plaintext record directly
final Map<String, AttributeValue> encrypted_record = encryptor.encryptRecord(record, actions, encryptionContext);

// Encrypted record fields change as expected
assert encrypted_record.get(STRING_FIELD_NAME).getB() != null; // the encrypted string is stored as bytes
assert encrypted_record.get(NUMBER_FIELD_NAME).getB() != null; // the encrypted number is stored as bytes
assert !record.get(BINARY_FIELD_NAME).getB().equals(encrypted_record.get(BINARY_FIELD_NAME).getB()); // the encrypted bytes have updated
assert record.get(IGNORED_FIELD_NAME).getS().equals(encrypted_record.get(IGNORED_FIELD_NAME).getS()); // ignored field is left as is

// We could now put the encrypted item to DynamoDB just as we would any other item.
// We're skipping it to to keep the example simpler.

Expand All @@ -125,5 +150,10 @@ private static void encryptRecord(String tableName, String keyTableName, String
// Decryption is identical. We'll pretend that we retrieved the record from DynamoDB.
final Map<String, AttributeValue> decrypted_record = encryptor.decryptRecord(encrypted_record, actions, encryptionContext);
System.out.println("Decrypted Record: " + decrypted_record);
}

// The decrypted fields match the original fields before encryption
assert record.get(STRING_FIELD_NAME).getS().equals(decrypted_record.get(STRING_FIELD_NAME).getS());
assert record.get(NUMBER_FIELD_NAME).getN().equals(decrypted_record.get(NUMBER_FIELD_NAME).getN());
assert record.get(BINARY_FIELD_NAME).getB().equals(decrypted_record.get(BINARY_FIELD_NAME).getB());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,12 @@
* For ease of the example, we create new random ones every time.
*/
public class SymmetricEncryptedItem {


private static final String STRING_FIELD_NAME = "example";
private static final String BINARY_FIELD_NAME = "and some binary";
private static final String NUMBER_FIELD_NAME = "some numbers";
private static final String IGNORED_FIELD_NAME = "leave me";

public static void main(String[] args) throws GeneralSecurityException {
final String tableName = args[0];
// Both AES and HMAC keys are just random bytes.
Expand All @@ -48,21 +53,21 @@ public static void main(String[] args) throws GeneralSecurityException {
secureRandom.nextBytes(rawHmac);
final SecretKey wrappingKey = new SecretKeySpec(rawAes, "AES");
final SecretKey signingKey = new SecretKeySpec(rawHmac, "HmacSHA256");

encryptRecord(tableName, wrappingKey, signingKey);
}

private static void encryptRecord(String tableName, SecretKey wrappingKey, SecretKey signingKey) throws GeneralSecurityException {
public static void encryptRecord(String tableName, SecretKey wrappingKey, SecretKey signingKey) throws GeneralSecurityException {
// Sample record to be encrypted
final String partitionKeyName = "partition_attribute";
final String sortKeyName = "sort_attribute";
final Map<String, AttributeValue> record = new HashMap<>();
record.put(partitionKeyName, new AttributeValue().withS("is this"));
record.put(sortKeyName, new AttributeValue().withN("55"));
record.put("example", new AttributeValue().withS("data"));
record.put("some numbers", new AttributeValue().withN("99"));
record.put("and some binary", new AttributeValue().withB(ByteBuffer.wrap(new byte[]{0x00, 0x01, 0x02})));
record.put("leave me", new AttributeValue().withS("alone")); // We want to ignore this attribute
record.put(STRING_FIELD_NAME, new AttributeValue().withS("data"));
record.put(NUMBER_FIELD_NAME, new AttributeValue().withN("99"));
record.put(BINARY_FIELD_NAME, new AttributeValue().withB(ByteBuffer.wrap(new byte[]{0x00, 0x01, 0x02})));
record.put(IGNORED_FIELD_NAME, new AttributeValue().withS("alone")); // We want to ignore this attribute

// Set up our configuration and clients. All of this is thread-safe and can be reused across calls.
// Provider Configuration
Expand Down Expand Up @@ -91,7 +96,7 @@ private static void encryptRecord(String tableName, SecretKey wrappingKey, Secre
// Partition and sort keys must not be encrypted but should be signed
actions.put(attributeName, signOnly);
break;
case "leave me":
case IGNORED_FIELD_NAME:
// For this example, we are neither signing nor encrypting this field
break;
default:
Expand All @@ -105,6 +110,12 @@ private static void encryptRecord(String tableName, SecretKey wrappingKey, Secre
// Encrypt the plaintext record directly
final Map<String, AttributeValue> encrypted_record = encryptor.encryptRecord(record, actions, encryptionContext);

// Encrypted record fields change as expected
assert encrypted_record.get(STRING_FIELD_NAME).getB() != null; // the encrypted string is stored as bytes
assert encrypted_record.get(NUMBER_FIELD_NAME).getB() != null; // the encrypted number is stored as bytes
assert !record.get(BINARY_FIELD_NAME).getB().equals(encrypted_record.get(BINARY_FIELD_NAME).getB()); // the encrypted bytes have updated
assert record.get(IGNORED_FIELD_NAME).getS().equals(encrypted_record.get(IGNORED_FIELD_NAME).getS()); // ignored field is left as is

// We could now put the encrypted item to DynamoDB just as we would any other item.
// We're skipping it to to keep the example simpler.

Expand All @@ -114,5 +125,10 @@ private static void encryptRecord(String tableName, SecretKey wrappingKey, Secre
// Decryption is identical. We'll pretend that we retrieved the record from DynamoDB.
final Map<String, AttributeValue> decrypted_record = encryptor.decryptRecord(encrypted_record, actions, encryptionContext);
System.out.println("Decrypted Record: " + decrypted_record);
}

// The decrypted fields match the original fields before encryption
assert record.get(STRING_FIELD_NAME).getS().equals(decrypted_record.get(STRING_FIELD_NAME).getS());
assert record.get(NUMBER_FIELD_NAME).getN().equals(decrypted_record.get(NUMBER_FIELD_NAME).getN());
assert record.get(BINARY_FIELD_NAME).getB().equals(decrypted_record.get(BINARY_FIELD_NAME).getB());
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.amazonaws.examples;

import org.testng.annotations.Test;

import java.security.GeneralSecurityException;
import java.security.KeyPair;
import java.security.KeyPairGenerator;

public class AsymmetricEncryptedItemTest {
private static final String TABLE_NAME = "java-ddbec-test-table-asym-example";

@Test
public void testEncryptAndDecrypt() throws GeneralSecurityException {
final KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(2048);
final KeyPair wrappingKeys = keyGen.generateKeyPair();
final KeyPair signingKeys = keyGen.generateKeyPair();

AsymmetricEncryptedItem.encryptRecord(TABLE_NAME, wrappingKeys, signingKeys);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.amazonaws.examples;

import com.amazonaws.services.kms.AWSKMS;
import com.amazonaws.services.kms.AWSKMSClientBuilder;
import org.testng.annotations.Test;

import java.security.GeneralSecurityException;

import static com.amazonaws.examples.TestUtils.US_WEST_2;
import static com.amazonaws.examples.TestUtils.US_WEST_2_KEY_ID;

public class AwsKmsEncryptedItemIT {
private static final String TABLE_NAME = "java-ddbec-test-table-kms-item-example";

@Test
public void testEncryptAndDecrypt() throws GeneralSecurityException {
final AWSKMS kms = AWSKMSClientBuilder.standard().withRegion(US_WEST_2).build();
AwsKmsEncryptedItem.encryptRecord(TABLE_NAME, US_WEST_2_KEY_ID, kms);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.amazonaws.examples;

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;
import com.amazonaws.services.kms.AWSKMS;
import com.amazonaws.services.kms.AWSKMSClientBuilder;
import org.testng.annotations.Test;

import static com.amazonaws.examples.AwsKmsEncryptedObject.EXAMPLE_TABLE_NAME;
import static com.amazonaws.examples.AwsKmsEncryptedObject.PARTITION_ATTRIBUTE;
import static com.amazonaws.examples.AwsKmsEncryptedObject.SORT_ATTRIBUTE;
import static com.amazonaws.examples.TestUtils.US_WEST_2;
import static com.amazonaws.examples.TestUtils.US_WEST_2_KEY_ID;
import static com.amazonaws.examples.TestUtils.createDDBTable;

public class AwsKmsEncryptedObjectIT {

@Test
public void testEncryptAndDecrypt() {
final AWSKMS kms = AWSKMSClientBuilder.standard().withRegion(US_WEST_2).build();
final AmazonDynamoDB ddb = DynamoDBEmbedded.create();

// Create the table under test
createDDBTable(ddb, EXAMPLE_TABLE_NAME, PARTITION_ATTRIBUTE, SORT_ATTRIBUTE);

AwsKmsEncryptedObject.encryptRecord(US_WEST_2_KEY_ID, ddb, kms);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.amazonaws.examples;

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;

import com.amazonaws.services.kms.AWSKMS;
import com.amazonaws.services.kms.AWSKMSClientBuilder;
import org.testng.annotations.Test;

import java.security.GeneralSecurityException;

import static com.amazonaws.examples.EncryptionContextOverridesWithDynamoDBMapper.PARTITION_ATTRIBUTE;
import static com.amazonaws.examples.EncryptionContextOverridesWithDynamoDBMapper.SORT_ATTRIBUTE;
import static com.amazonaws.examples.EncryptionContextOverridesWithDynamoDBMapper.TABLE_NAME_TO_OVERRIDE;
import static com.amazonaws.examples.TestUtils.US_WEST_2;
import static com.amazonaws.examples.TestUtils.US_WEST_2_KEY_ID;
import static com.amazonaws.examples.TestUtils.createDDBTable;

public class EncryptionContextOverridesWithDynamoDBMapperIT {
private static final String OVERRIDE_TABLE_NAME = "java-ddbec-test-table-encctx-override-example";

@Test
public void testEncryptAndDecrypt() throws GeneralSecurityException {
final AWSKMS kms = AWSKMSClientBuilder.standard().withRegion(US_WEST_2).build();
final AmazonDynamoDB ddb = DynamoDBEmbedded.create();

// Create the table under test
createDDBTable(ddb, TABLE_NAME_TO_OVERRIDE, PARTITION_ATTRIBUTE, SORT_ATTRIBUTE);

EncryptionContextOverridesWithDynamoDBMapper.encryptRecord(US_WEST_2_KEY_ID, OVERRIDE_TABLE_NAME, ddb, kms);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.amazonaws.examples;

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers.store.MetaStore;
import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;
import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput;
import com.amazonaws.services.kms.AWSKMS;
import com.amazonaws.services.kms.AWSKMSClientBuilder;
import org.testng.annotations.Test;

import java.security.GeneralSecurityException;

import static com.amazonaws.examples.MostRecentEncryptedItem.PARTITION_ATTRIBUTE;
import static com.amazonaws.examples.MostRecentEncryptedItem.SORT_ATTRIBUTE;
import static com.amazonaws.examples.TestUtils.*;

public class MostRecentEncryptedItemIT {
private static final String TABLE_NAME = "java-ddbec-test-table-mostrecent-example";
private static final String KEY_TABLE_NAME = "java-ddbec-test-table-mostrecent-example-keys";
private static final String MATERIAL_NAME = "testMaterial";

@Test
public void testEncryptAndDecrypt() throws GeneralSecurityException {
final AWSKMS kms = AWSKMSClientBuilder.standard().withRegion(US_WEST_2).build();
final AmazonDynamoDB ddb = DynamoDBEmbedded.create();

// Create the key table under test
MetaStore.createTable(ddb, KEY_TABLE_NAME, new ProvisionedThroughput(1L, 1L));

// Create the table under test
createDDBTable(ddb, TABLE_NAME, PARTITION_ATTRIBUTE, SORT_ATTRIBUTE);

MostRecentEncryptedItem.encryptRecord(TABLE_NAME, KEY_TABLE_NAME, US_WEST_2_KEY_ID, MATERIAL_NAME, ddb, kms);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.amazonaws.examples;

import org.testng.annotations.Test;

import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;

public class SymmetricEncryptedItemTest {
private static final String TABLE_NAME = "java-ddbec-test-table-sym-example";

@Test
public void testEncryptAndDecrypt() throws GeneralSecurityException {
final SecureRandom secureRandom = new SecureRandom();
byte[] rawAes = new byte[32];
byte[] rawHmac = new byte[32];
secureRandom.nextBytes(rawAes);
secureRandom.nextBytes(rawHmac);
final SecretKey wrappingKey = new SecretKeySpec(rawAes, "AES");
final SecretKey signingKey = new SecretKeySpec(rawHmac, "HmacSHA256");

SymmetricEncryptedItem.encryptRecord(TABLE_NAME, wrappingKey, signingKey);
}
}
41 changes: 41 additions & 0 deletions examples/src/test/java/com/amazonaws/examples/TestUtils.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package com.amazonaws.examples;

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.*;

import java.util.ArrayList;

import static com.amazonaws.examples.AwsKmsEncryptedObject.*;

public class TestUtils {
private TestUtils() {
throw new UnsupportedOperationException(
"This class exists to hold static resources and cannot be instantiated."
);
}

/**
* These special test keys have been configured to allow Encrypt, Decrypt, and GenerateDataKey operations from any
* AWS principal and should be used when adding new KMS tests.
*
* This should go without saying, but never use these keys for production purposes (as anyone in the world can
* decrypt data encrypted using them).
*/
public static final String US_WEST_2_KEY_ID = "arn:aws:kms:us-west-2:658956600833:key/b3537ef1-d8dc-4780-9f5a-55776cbb2f7f";
public static final String US_WEST_2 = "us-west-2";

public static void createDDBTable(AmazonDynamoDB ddb, String tableName, String partitionName, String sortName) {
ArrayList<AttributeDefinition> attrDef = new ArrayList<AttributeDefinition>();
attrDef.add(new AttributeDefinition().withAttributeName(partitionName).withAttributeType(ScalarAttributeType.S));
attrDef.add(new AttributeDefinition().withAttributeName(sortName).withAttributeType(ScalarAttributeType.N));

ArrayList<KeySchemaElement> keySchema = new ArrayList<KeySchemaElement>();
keySchema.add(new KeySchemaElement().withAttributeName(partitionName).withKeyType(KeyType.HASH));
keySchema.add(new KeySchemaElement().withAttributeName(sortName).withKeyType(KeyType.RANGE));

ddb.createTable(new CreateTableRequest().withTableName(tableName)
.withAttributeDefinitions(attrDef)
.withKeySchema(keySchema)
.withProvisionedThroughput(new ProvisionedThroughput(100L, 100L)));
}
}
258 changes: 7 additions & 251 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>software.amazon.cryptools</groupId>
<artifactId>dynamodbencryptionclient-pom</artifactId>
<version>0.1.0-SNAPSHOT</version>
<version>1.15.0</version>
<packaging>pom</packaging>

<name>aws-dynamodb-encryption-java :: POM</name>
Expand All @@ -20,8 +20,6 @@
</scm>

<modules>
<module>ddej-build-tools</module>
<module>common</module>
<module>sdk1</module>
<module>examples</module>
</modules>
Expand All @@ -34,20 +32,6 @@
</license>
</licenses>

<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<sqlite4java.version>1.0.392</sqlite4java.version>
<checkstyle.version>8.29</checkstyle.version>
<maven-checkstyle-plugin.version>3.1.0</maven-checkstyle-plugin.version>
<ddej-build-tools.version>0.1.0</ddej-build-tools.version>
<jacoco-maven-plugin.version>0.8.3</jacoco-maven-plugin.version>
<maven-source-plugin.version>3.0.1</maven-source-plugin.version>
<maven-jxr-plugin.version>3.0.0</maven-jxr-plugin.version>
<maven-failsafe-plugin.version>3.0.0-M3</maven-failsafe-plugin.version>
<maven-surefire-plugin.version>3.0.0-M3</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
</properties>

<developers>
<developer>
<id>amazonwebservices</id>
Expand All @@ -59,244 +43,16 @@
</developer>
</developers>

<dependencies>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.10</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>org.quicktheories</groupId>
<artifactId>quicktheories</artifactId>
<version>0.25</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<version>1.3</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-ext-jdk15on</artifactId>
<version>1.65</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>DynamoDBLocal</artifactId>
<version>1.10.5.1</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.almworks.sqlite4java</groupId>
<artifactId>sqlite4java</artifactId>
<version>${sqlite4java.version}</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.almworks.sqlite4java</groupId>
<artifactId>libsqlite4java-osx</artifactId>
<version>${sqlite4java.version}</version>
<type>dylib</type>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.almworks.sqlite4java</groupId>
<artifactId>sqlite4java-win32-x64</artifactId>
<version>${sqlite4java.version}</version>
<type>dll</type>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.almworks.sqlite4java</groupId>
<artifactId>libsqlite4java-linux-amd64</artifactId>
<type>so</type>
<version>${sqlite4java.version}</version>
<scope>test</scope>
</dependency>
</dependencies>

<!--Custom repository:-->
<repositories>
<repository>
<id>dynamodb-local</id>
<name>DynamoDB Local Release Repository</name>
<url>https://s3-us-west-2.amazonaws.com/dynamodb-local/release</url>
</repository>
</repositories>

<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>

<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>3.0.1</version>
</plugin>

<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>3.0.1</version>
<configuration>
<excludePackageNames>*.internal:*.transform</excludePackageNames>
<minmemory>128m</minmemory>
<maxmemory>1024m</maxmemory>
</configuration>
</plugin>

<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>3.1.1</version>
<executions>
<execution>
<id>copy</id>
<phase>test-compile</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<includeScope>test</includeScope>
<includeTypes>so,dll,dylib</includeTypes>
<outputDirectory>${project.build.directory}/test-lib</outputDirectory>
</configuration>
</execution>
</executions>
</plugin>

<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<useSystemClassLoader>false</useSystemClassLoader>
<includes>
<include>**/Test*.java</include>
<include>**/*Test.java</include>
<include>**/*TestCase.java</include>
<include>**/*Tests.java</include>
<include>**/*TestCases.java</include>
</includes>
<systemProperties>
<property>
<name>sqlite4java.library.path</name>
<value>${project.build.directory}/test-lib</value>
</property>
</systemProperties>
</configuration>
</plugin>

<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${maven-checkstyle-plugin.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>${checkstyle.version}</version>
</dependency>
<dependency>
<groupId>software.amazon.cryptools</groupId>
<artifactId>ddej-build-tools</artifactId>
<version>${ddej-build-tools.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>checkstyle</id>
<phase>validate</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
<configuration>
<configLocation>software/amazon/cryptools/ddej-build-tools/checkstyle/checkstyle.xml</configLocation>
<suppressionsLocation>software/amazon/cryptools/ddej-build-tools/checkstyle/checkstyle-suppressions.xml</suppressionsLocation>
<consoleOutput>true</consoleOutput>
<failsOnError>true</failsOnError>
<logViolationsToConsole>true</logViolationsToConsole>
<failOnViolation>true</failOnViolation>
</configuration>
</plugin>

<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<version>${maven-failsafe-plugin.version}</version>
<configuration>
<useSystemClassLoader>false</useSystemClassLoader>
<systemProperties>
<property>
<name>sqlite4java.library.path</name>
<value>${project.build.directory}/test-lib</value>
</property>
</systemProperties>
<includes>
<include>**/*ITCase.java</include>
</includes>
</configuration>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
</plugin>

<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco-maven-plugin.version}</version>
<executions>
<execution>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>report</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>

<reporting>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jxr-plugin</artifactId>
<version>${maven-jxr-plugin.version}</version>
<artifactId>maven-deploy-plugin</artifactId>
<version>2.8.2</version>
<configuration>
<skip>true</skip>
</configuration>
</plugin>
</plugins>
</reporting>
</build>
</project>
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,6 @@
<property name="fileExtensions" value="java"/>
<module name="RegexpHeader">
<property name="header"
value="^/*\n * Copyright \d{4}([-]\d{4})? Amazon\.com, Inc\. or its affiliates\. All Rights Reserved\.$"/>
value="^(/*|// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.)\n( * Copyright \d{4}([-]\d{4})? Amazon\.com, Inc\. or its affiliates\. All Rights Reserved\.)?$"/>
</module>
</module>
97 changes: 86 additions & 11 deletions sdk1/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,60 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>

<groupId>com.amazonaws</groupId>
<artifactId>aws-dynamodb-encryption-java</artifactId>
<version>1.14.1</version>
<version>1.15.0</version>
<packaging>jar</packaging>
<name>aws-dynamodb-encryption-java :: SDK1</name>
<description>AWS DynamoDB Encryption Client for AWS Java SDK v1</description>
<url>https://github.com/aws/aws-dynamodb-encryption-java</url>

<profiles>
<profile>
<id>publishingCodeArtifact</id>

<distributionManagement>
<!--
Registers an alternate repository for testing the publishing process using CodeArtifact
before moving to the production sonatype repository.
This can be used with a mvn invocation like:
mvn deploy -PpublishingCodeArtifact \
-DaltDeploymentRepository=codeartifact::default::$CODEARTIFACT_REPO_URL \
...
-->
<repository>
<id>codeartifact</id>
<name>codeartifact</name>
<!-- url specified using -DaltDeploymentRepository to avoid hardcoding it here -->
</repository>
</distributionManagement>

<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-gpg-plugin</artifactId>
<version>1.6</version>
<executions>
<execution>
<id>sign-artifacts</id>
<phase>verify</phase>
<goals>
<goal>sign</goal>
</goals>
<configuration>
<gpgArguments>
<arg>--pinentry-mode</arg>
<arg>loopback</arg>
</gpgArguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>

<profile>
<id>publishing</id>

Expand Down Expand Up @@ -81,7 +125,6 @@
<sqlite4java.version>1.0.392</sqlite4java.version>
<checkstyle.version>8.29</checkstyle.version>
<maven-checkstyle-plugin.version>3.1.0</maven-checkstyle-plugin.version>
<ddej-build-tools.version>0.1.0</ddej-build-tools.version>
<jacoco-maven-plugin.version>0.8.3</jacoco-maven-plugin.version>
<maven-source-plugin.version>3.0.1</maven-source-plugin.version>
<maven-jxr-plugin.version>3.0.0</maven-jxr-plugin.version>
Expand Down Expand Up @@ -148,7 +191,7 @@
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-ext-jdk15on</artifactId>
<version>1.60</version>
<version>1.68</version>
<scope>test</scope>
</dependency>

Expand Down Expand Up @@ -189,6 +232,34 @@
<version>${sqlite4java.version}</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>2.9.0</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>2.9.8</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>com.googlecode.multithreadedtc</groupId>
<artifactId>multithreadedtc</artifactId>
<version>1.01</version>
<scope>test</scope>
</dependency>

<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
</dependencies>

<!--Custom repository:-->
Expand Down Expand Up @@ -281,11 +352,6 @@
<artifactId>checkstyle</artifactId>
<version>${checkstyle.version}</version>
</dependency>
<dependency>
<groupId>software.amazon.cryptools</groupId>
<artifactId>ddej-build-tools</artifactId>
<version>${ddej-build-tools.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
Expand All @@ -297,8 +363,8 @@
</execution>
</executions>
<configuration>
<configLocation>software/amazon/cryptools/ddej-build-tools/checkstyle/checkstyle.xml</configLocation>
<suppressionsLocation>software/amazon/cryptools/ddej-build-tools/checkstyle/checkstyle-suppressions.xml</suppressionsLocation>
<configLocation>checkstyle/checkstyle.xml</configLocation>
<suppressionsLocation>checkstyle/checkstyle-suppressions.xml</suppressionsLocation>
<consoleOutput>true</consoleOutput>
<failsOnError>true</failsOnError>
<logViolationsToConsole>true</logViolationsToConsole>
Expand All @@ -320,6 +386,7 @@
</systemProperties>
<includes>
<include>**/*ITCase.java</include>
<include>**/*HolisticIT.java</include>
</includes>
</configuration>
<executions>
Expand Down Expand Up @@ -351,6 +418,14 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-deploy-plugin</artifactId>
<version>2.8.2</version>
<configuration>
<skip>false</skip>
</configuration>
</plugin>
</plugins>
</build>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,15 @@ protected SecretKey unwrapKey(Map<String, String> description, byte[] encryptedK
description.get(CONTENT_KEY_ALGORITHM), Cipher.SECRET_KEY, null, wrappingAlgorithm);
} else {
Cipher cipher = Cipher.getInstance(wrappingAlgorithm);

// This can be of the form "AES/256" as well as "AES" e.g.,
// but we want to set the SecretKey with just "AES" in either case
String[] algPieces = description.get(CONTENT_KEY_ALGORITHM).split("/", 2);
String contentKeyAlgorithm = algPieces[0];

cipher.init(Cipher.UNWRAP_MODE, unwrappingKey, Utils.getRng());
return (SecretKey) cipher.unwrap(encryptedKey,
description.get(CONTENT_KEY_ALGORITHM), Cipher.SECRET_KEY);
contentKeyAlgorithm, Cipher.SECRET_KEY);
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers;

import com.amazonaws.services.dynamodbv2.datamodeling.encryption.EncryptionContext;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.materials.DecryptionMaterials;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.materials.EncryptionMaterials;
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers.store.ProviderStore;
import com.amazonaws.services.dynamodbv2.datamodeling.internal.TTLCache;
import com.amazonaws.services.dynamodbv2.datamodeling.internal.TTLCache.EntryLoader;

import java.util.concurrent.TimeUnit;

import static com.amazonaws.services.dynamodbv2.datamodeling.internal.Utils.checkNotNull;

/**
* This meta-Provider encrypts data with the most recent version of keying materials from a
* {@link ProviderStore} and decrypts using whichever version is appropriate. It also caches the
* results from the {@link ProviderStore} to avoid excessive load on the backing systems.
*/
public class CachingMostRecentProvider implements EncryptionMaterialsProvider {
private static final long INITIAL_VERSION = 0;
private static final String PROVIDER_CACHE_KEY_DELIM = "#";
private static final int DEFAULT_CACHE_MAX_SIZE = 1000;

private final long ttlInNanos;
private final ProviderStore keystore;
protected final String defaultMaterialName;
private final TTLCache<EncryptionMaterialsProvider> providerCache;
private final TTLCache<Long> versionCache;

private final EntryLoader<Long> versionLoader = new EntryLoader<Long>() {
@Override
public Long load(String entryKey) {
return keystore.getMaxVersion(entryKey);
}
};

private final EntryLoader<EncryptionMaterialsProvider> providerLoader = new EntryLoader<EncryptionMaterialsProvider>() {
@Override
public EncryptionMaterialsProvider load(String entryKey) {
final String[] parts = entryKey.split(PROVIDER_CACHE_KEY_DELIM, 2);
if (parts.length != 2) {
throw new IllegalStateException("Invalid cache key for provider cache: " + entryKey);
}
return keystore.getProvider(parts[0], Long.parseLong(parts[1]));
}
};


/**
* Creates a new {@link CachingMostRecentProvider}.
*
* @param keystore
* The key store that this provider will use to determine which material and which version of material to use
* @param materialName
* The name of the materials associated with this provider
* @param ttlInMillis
* The length of time in milliseconds to cache the most recent provider
*/
public CachingMostRecentProvider(final ProviderStore keystore, final String materialName, final long ttlInMillis) {
this(keystore, materialName, ttlInMillis, DEFAULT_CACHE_MAX_SIZE);
}

/**
* Creates a new {@link CachingMostRecentProvider}.
*
* @param keystore
* The key store that this provider will use to determine which material and which version of material to use
* @param materialName
* The name of the materials associated with this provider
* @param ttlInMillis
* The length of time in milliseconds to cache the most recent provider
* @param maxCacheSize
* The maximum size of the underlying caches this provider uses. Entries will be evicted from the cache
* once this size is exceeded.
*/
public CachingMostRecentProvider(final ProviderStore keystore, final String materialName, final long ttlInMillis, final int maxCacheSize) {
this.keystore = checkNotNull(keystore, "keystore must not be null");
this.defaultMaterialName = materialName;
this.ttlInNanos = TimeUnit.MILLISECONDS.toNanos(ttlInMillis);

this.providerCache = new TTLCache<>(maxCacheSize, ttlInMillis, providerLoader);
this.versionCache = new TTLCache<>(maxCacheSize, ttlInMillis, versionLoader);
}

@Override
public EncryptionMaterials getEncryptionMaterials(EncryptionContext context) {
final String materialName = getMaterialName(context);
final long currentVersion = versionCache.load(materialName);

if (currentVersion < 0) {
// The material hasn't been created yet, so specify a loading function
// to create the first version of materials and update both caches.
// We want this to be done as part of the cache load to ensure that this logic
// only happens once in a multithreaded environment,
// in order to limit calls to the keystore's dependencies.
final String cacheKey = buildCacheKey(materialName, INITIAL_VERSION);
EncryptionMaterialsProvider newProvider = providerCache.load(
cacheKey,
s -> {
// Create the new material in the keystore
final String[] parts = s.split(PROVIDER_CACHE_KEY_DELIM, 2);
if (parts.length != 2) {
throw new IllegalStateException("Invalid cache key for provider cache: " + s);
}
EncryptionMaterialsProvider provider = keystore.getOrCreate(parts[0], Long.parseLong(parts[1]));

// We now should have version 0 in our keystore.
// Update the version cache for this material as a side effect
versionCache.put(materialName, INITIAL_VERSION);

// Return the new materials to be put into the cache
return provider;
}
);

return newProvider.getEncryptionMaterials(context);
} else {
final String cacheKey = buildCacheKey(materialName, currentVersion);
return providerCache.load(cacheKey).getEncryptionMaterials(context);
}
}

public DecryptionMaterials getDecryptionMaterials(EncryptionContext context) {
final long version = keystore.getVersionFromMaterialDescription(
context.getMaterialDescription());
final String materialName = getMaterialName(context);
final String cacheKey = buildCacheKey(materialName, version);

EncryptionMaterialsProvider provider = providerCache.load(cacheKey);
return provider.getDecryptionMaterials(context);
}

/**
* Completely empties the cache of both the current and old versions.
*/
@Override
public void refresh() {
versionCache.clear();
providerCache.clear();
}

public String getMaterialName() {
return defaultMaterialName;
}

public long getTtlInMills() {
return TimeUnit.NANOSECONDS.toMillis(ttlInNanos);
}

/**
* The current version of the materials being used for encryption. Returns -1 if we do not
* currently have a current version.
*/
public long getCurrentVersion() {
return versionCache.load(getMaterialName());
}

/**
* The last time the current version was updated. Returns 0 if we do not currently have a
* current version.
*/
public long getLastUpdated() {
// We cache a version of -1 to mean that there is not a current version
if (versionCache.load(getMaterialName()) < 0) {
return 0;
}
// Otherwise, return the last update time of that entry
return TimeUnit.NANOSECONDS.toMillis(versionCache.getLastUpdated(getMaterialName()));
}

protected String getMaterialName(final EncryptionContext context) {
return defaultMaterialName;
}

private static String buildCacheKey(final String materialName, final long version) {
StringBuilder result = new StringBuilder(materialName);
result.append(PROVIDER_CACHE_KEY_DELIM);
result.append(version);
return result.toString();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,7 @@ protected GenerateDataKeyResult generateDataKey(final GenerateDataKeyRequest req
* <dd>{@code RangeKeyValue}</dd>
* <dt>{@link #TABLE_NAME_EC_KEY}</dt>
* <dd>{@code TableName}</dd>
* </dl>
*/
protected void populateKmsEcFromEc(EncryptionContext context, Map<String, String> kmsEc) {
final String hashKeyName = context.getHashKeyName();
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,5 @@
/*
* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except
* in compliance with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers;

import java.util.concurrent.atomic.AtomicReference;
Expand All @@ -21,12 +11,24 @@
import com.amazonaws.services.dynamodbv2.datamodeling.encryption.providers.store.ProviderStore;
import com.amazonaws.services.dynamodbv2.datamodeling.internal.LRUCache;

import static com.amazonaws.services.dynamodbv2.datamodeling.internal.Utils.checkNotNull;

/**
* This meta-Provider encrypts data with the most recent version of keying materials from a
* {@link ProviderStore} and decrypts using whichever version is appropriate. It also caches the
* results from the {@link ProviderStore} to avoid excessive load on the backing systems. The cache
* is not currently configurable.
*
* @deprecated This provider uses a TTL value to determine when to ping the keystore
* to get the current materials version, instead of using the TTL value to determine
* when to expire cached materials. This is unintuitive behavior for users of this provider
* who may wish to use a TTL to force the keystore to re-obtain materials.
*
* Use the CachingMostRecentProvider, which uses a user defined TTL value to
* also expire the cached materials themselves, forcing
* the keystore to regularly re-obtain materials.
*/
@Deprecated
public class MostRecentProvider implements EncryptionMaterialsProvider {
private static final long MILLI_TO_NANO = 1000000L;
private static final long TTL_GRACE_IN_NANO = 500 * MILLI_TO_NANO;
Expand Down Expand Up @@ -165,14 +167,6 @@ private static String buildCacheKey(final String materialName, final long versio
return result.toString();
}

private static <V> V checkNotNull(final V ref, final String errMsg) {
if (ref == null) {
throw new NullPointerException(errMsg);
} else {
return ref;
}
}

private static class LockedState {
private final ReentrantLock lock = new ReentrantLock(true);
private volatile AtomicReference<State> state = new AtomicReference<>(new State());
Expand Down
Original file line number Diff line number Diff line change
@@ -1,26 +1,11 @@
/*
* Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.services.dynamodbv2.datamodeling.internal;

import com.amazonaws.annotation.ThreadSafe;

import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

Expand All @@ -36,10 +21,7 @@ public final class LRUCache<T> {
* Used for the internal cache.
*/
private final Map<String, T> map;
/**
* Listener for cache entry eviction.
*/
private final RemovalListener<T> listener;

/**
* Maximum size of the cache.
*/
Expand All @@ -48,25 +30,13 @@ public final class LRUCache<T> {
/**
* @param maxSize
* the maximum number of entries of the cache
* @param listener
* object which is notified immediately prior to the removal of
* any objects from the cache
*/
public LRUCache(final int maxSize, final RemovalListener<T> listener) {
public LRUCache(final int maxSize) {
if (maxSize < 1) {
throw new IllegalArgumentException("maxSize " + maxSize + " must be at least 1");
}
this.maxSize = maxSize;
this.listener = listener;
map = Collections.synchronizedMap(new LRUHashMap<T>(maxSize, listener));
}

/**
* @param maxSize
* the maximum number of entries of the cache
*/
public LRUCache(final int maxSize) {
this(maxSize, null);
map = Collections.synchronizedMap(new LRUHashMap<>(maxSize));
}

/**
Expand Down Expand Up @@ -96,23 +66,11 @@ public int getMaxSize() {
}

public void clear() {
// The more complicated logic is to ensure that the listener is
// actually called for all entries.
if (listener != null) {
List<Entry<String, T>> removedEntries = new ArrayList<Entry<String, T>>();
synchronized (map) {
Iterator<Entry<String, T>> it = map.entrySet().iterator();
while(it.hasNext()) {
removedEntries.add(it.next());
it.remove();
}
}
for (Entry<String, T> entry : removedEntries) {
listener.onRemoval(entry);
}
} else {
map.clear();
}
map.clear();
}

public T remove(String key) {
return map.remove(key);
}

@Override
Expand All @@ -123,27 +81,15 @@ public String toString() {
@SuppressWarnings("serial")
private static class LRUHashMap<T> extends LinkedHashMap<String, T> {
private final int maxSize;
private final RemovalListener<T> listener;

private LRUHashMap(final int maxSize, final RemovalListener<T> listener) {
private LRUHashMap(final int maxSize) {
super(10, 0.75F, true);
this.maxSize = maxSize;
this.listener = listener;
}

@Override
protected boolean removeEldestEntry(final Entry<String, T> eldest) {
if (size() > maxSize) {
if (listener != null) {
listener.onRemoval(eldest);
}
return true;
}
return false;
return size() > maxSize;
}
}

public static interface RemovalListener<T> {
public void onRemoval(Entry<String, T> entry);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except
* in compliance with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/

package com.amazonaws.services.dynamodbv2.datamodeling.internal;

interface MsClock {
MsClock WALLCLOCK = System::nanoTime;

public long timestampNano();
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,259 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.services.dynamodbv2.datamodeling.internal;

import com.amazonaws.annotation.ThreadSafe;

import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;

import static com.amazonaws.services.dynamodbv2.datamodeling.internal.Utils.checkNotNull;

/**
* A cache, backed by an LRUCache, that uses a loader to calculate values on cache miss
* or expired TTL.
*
* Note that this cache does not proactively evict expired entries,
* however will immediately evict entries discovered to be expired on load.
*
* @param <T>
* value type
*/
@ThreadSafe
public final class TTLCache<T> {
/**
* Used for the internal cache.
*/
private final LRUCache<LockedState<T>> cache;

/**
* Time to live for entries in the cache.
*/
private final long ttlInNanos;

/**
* Used for loading new values into the cache on cache miss or expiration.
*/
private final EntryLoader<T> defaultLoader;

// Mockable time source, to allow us to test TTL behavior.
// package access for tests
MsClock clock = MsClock.WALLCLOCK;

private static final long TTL_GRACE_IN_NANO = TimeUnit.MILLISECONDS.toNanos(500);

/**
* @param maxSize
* the maximum number of entries of the cache
* @param ttlInMillis
* the time to live value for entries of the cache, in milliseconds
*/
public TTLCache(final int maxSize, final long ttlInMillis, final EntryLoader<T> loader) {
if (maxSize < 1) {
throw new IllegalArgumentException("maxSize " + maxSize + " must be at least 1");
}
if (ttlInMillis < 1) {
throw new IllegalArgumentException("ttlInMillis " + maxSize + " must be at least 1");
}
this.ttlInNanos = TimeUnit.MILLISECONDS.toNanos(ttlInMillis);
this.cache = new LRUCache<>(maxSize);
this.defaultLoader = checkNotNull(loader, "loader must not be null");
}

/**
* Uses the default loader to calculate the value at key and insert it into the cache,
* if it doesn't already exist or is expired according to the TTL.
*
* This immediately evicts entries past the TTL such that a load failure results
* in the removal of the entry.
*
* Entries that are not expired according to the TTL are returned without recalculating the value.
*
* Within a grace period past the TTL, the cache may either return the cached value without recalculating
* or use the loader to recalculate the value. This is implemented such that, in a multi-threaded environment,
* only one thread per cache key uses the loader to recalculate the value at one time.
*
* @param key
* The cache key to load the value at
* @return
* The value of the given value (already existing or re-calculated).
*/
public T load(final String key) {
return load(key, defaultLoader::load);
}

/**
* Uses the inputted function to calculate the value at key and insert it into the cache,
* if it doesn't already exist or is expired according to the TTL.
*
* This immediately evicts entries past the TTL such that a load failure results
* in the removal of the entry.
*
* Entries that are not expired according to the TTL are returned without recalculating the value.
*
* Within a grace period past the TTL, the cache may either return the cached value without recalculating
* or use the loader to recalculate the value. This is implemented such that, in a multi-threaded environment,
* only one thread per cache key uses the loader to recalculate the value at one time.
*
* Returns the value of the given key (already existing or re-calculated).
*
* @param key
* The cache key to load the value at
* @param f
* The function to use to load the value, given key as input
* @return
* The value of the given value (already existing or re-calculated).
*/
public T load(final String key, Function<String, T> f) {
final LockedState<T> ls = cache.get(key);

if (ls == null) {
// The entry doesn't exist yet, so load a new one.
return loadNewEntryIfAbsent(key, f);
} else if (clock.timestampNano() - ls.getState().lastUpdatedNano > ttlInNanos + TTL_GRACE_IN_NANO) {
// The data has expired past the grace period.
// Evict the old entry and load a new entry.
cache.remove(key);
return loadNewEntryIfAbsent(key, f);
} else if (clock.timestampNano() - ls.getState().lastUpdatedNano <= ttlInNanos) {
// The data hasn't expired. Return as-is from the cache.
return ls.getState().data;
} else if (!ls.tryLock()) {
// We are in the TTL grace period. If we couldn't grab the lock, then some other
// thread is currently loading the new value. Because we are in the grace period,
// use the cached data instead of waiting for the lock.
return ls.getState().data;
}

// We are in the grace period and have acquired a lock.
// Update the cache with the value determined by the loading function.
try {
T loadedData = f.apply(key);
ls.update(loadedData, clock.timestampNano());
return ls.getState().data;
} finally {
ls.unlock();
}
}

// Synchronously calculate the value for a new entry in the cache if it doesn't already exist.
// Otherwise return the cached value.
// It is important that this is the only place where we use the loader for a new entry,
// given that we don't have the entry yet to lock on.
// This ensures that the loading function is only called once if multiple threads
// attempt to add a new entry for the same key at the same time.
private synchronized T loadNewEntryIfAbsent(final String key, Function<String, T> f) {
// If the entry already exists in the cache, return it
final LockedState<T> cachedState = cache.get(key);
if (cachedState != null) {
return cachedState.getState().data;
}

// Otherwise, load the data and create a new entry
T loadedData = f.apply(key);
LockedState<T> ls = new LockedState<>(loadedData, clock.timestampNano());
cache.add(key, ls);
return loadedData;
}

/**
* Put a new entry in the cache.
* Returns the value previously at that key in the cache,
* or null if the entry previously didn't exist or
* is expired.
*/
public synchronized T put(final String key, final T value) {
LockedState<T> ls = new LockedState<>(value, clock.timestampNano());
LockedState<T> oldLockedState = cache.add(key, ls);
if (oldLockedState == null || clock.timestampNano() - oldLockedState.getState().lastUpdatedNano > ttlInNanos + TTL_GRACE_IN_NANO) {
return null;
}
return oldLockedState.getState().data;
}

/**
* Get when the entry at this key was last updated.
* Returns 0 if the entry doesn't exist at key.
*/
public long getLastUpdated(String key) {
LockedState<T> ls = cache.get(key);
if (ls == null) {
return 0;
}
return ls.getState().lastUpdatedNano;
}

/**
* Returns the current size of the cache.
*/
public int size() {
return cache.size();
}

/**
* Returns the maximum size of the cache.
*/
public int getMaxSize() {
return cache.getMaxSize();
}

/**
* Clears all entries from the cache.
*/
public void clear() {
cache.clear();
}

@Override
public String toString() {
return cache.toString();
}

public interface EntryLoader<T> {
T load(String entryKey);
}

// An object which stores a state alongside a lock,
// and performs updates to that state atomically.
// The state may only be updated if the lock is acquired by the current thread.
private static class LockedState<T> {
private final ReentrantLock lock = new ReentrantLock(true);
private final AtomicReference<State<T>> state;

public LockedState(T data, long createTimeNano) {
state = new AtomicReference<>(new State<>(data, createTimeNano));
}

public State<T> getState() {
return state.get();
}

public void unlock() {
lock.unlock();
}

public boolean tryLock() {
return lock.tryLock();
}

public void update(T data, long createTimeNano) {
if (!lock.isHeldByCurrentThread()) {
throw new IllegalStateException("Lock not held by current thread");
}
state.set(new State<>(data, createTimeNano));
}
}

// An object that holds some data and the time at which this object was created
private static class State<T> {
public final T data;
public final long lastUpdatedNano;

public State(T data, long lastUpdatedNano) {
this.data = data;
this.lastUpdatedNano = lastUpdatedNano;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,12 @@ public static byte[] getRandom(int len) {
getRng().nextBytes(result);
return result;
}

public static <V> V checkNotNull(final V ref, final String errMsg) {
if (ref == null) {
throw new NullPointerException(errMsg);
} else {
return ref;
}
}
}
Loading