Skip to content

Commit

Permalink
Merge pull request #76 from HotSushi/storage
Browse files Browse the repository at this point in the history
Add cluster.yaml, StorageProperties, StorageType enum for supporting multiple storages
  • Loading branch information
HotSushi committed Apr 22, 2024
2 parents 8312af7 + e965e04 commit 57d03fe
Show file tree
Hide file tree
Showing 5 changed files with 209 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
package com.linkedin.openhouse.cluster.storage;

import lombok.*;

/**
* Enum for supported storage types.
*
* <p>New types should be added here as public static final fields, and their corresponding
* implementations should be added to the fromString method.
*/
public class StorageType {
public static final Type HDFS = new Type("hdfs");
public static final Type LOCAL = new Type("local");

@AllArgsConstructor
@EqualsAndHashCode
@Getter
public static class Type {
private String value;
}

public Type fromString(String type) {
if (HDFS.getValue().equals(type)) {
return HDFS;
} else if (LOCAL.getValue().equals(type)) {
return LOCAL;
} else {
throw new IllegalArgumentException("Unknown storage type: " + type);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
package com.linkedin.openhouse.cluster.storage.configs;

import com.linkedin.openhouse.cluster.configs.YamlPropertySourceFactory;
import java.util.HashMap;
import java.util.Map;
import lombok.*;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.PropertySource;

/**
* This class represents the storage properties for the cluster. It includes the default storage
* type and a map of different storage types. Each storage type has its own properties such as root
* path, endpoint, and parameters. For list of supported storage types, see {@link
* com.linkedin.openhouse.cluster.storage.StorageType}.
*/
@Configuration
@ConfigurationProperties(prefix = "cluster.storages")
@PropertySource(
name = "clusterStorage",
value = "file:${OPENHOUSE_CLUSTER_CONFIG_PATH:/var/config/cluster.yaml}",
factory = YamlPropertySourceFactory.class,
ignoreResourceNotFound = true)
@Getter
@Setter
public class StorageProperties {
private String defaultType;
private Map<String, StorageTypeProperties> types;

@Getter
@Setter
@AllArgsConstructor
@NoArgsConstructor
@Builder(toBuilder = true)
public static class StorageTypeProperties {
private String rootPath;
private String endpoint;
@Builder.Default private Map<String, String> parameters = new HashMap<>();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package com.linkedin.openhouse.tables.mock.storage;

import com.linkedin.openhouse.cluster.storage.configs.StorageProperties;
import com.linkedin.openhouse.tables.mock.properties.CustomClusterPropertiesInitializer;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;

@SpringBootTest
@ContextConfiguration(initializers = CustomClusterPropertiesInitializer.class)
public class StoragePropertiesConfigTest {
@Autowired private StorageProperties storageProperties;

private static final String DEFAULT_TYPE = "hdfs";

private static final String DEFAULT_ENDPOINT = "hdfs://localhost:9000";

private static final String ANOTHER_TYPE = "objectstore";

private static final String ANOTHER_ENDPOINT = "http://localhost:9000";
private static final String NON_EXISTING_TYPE = "non-existing-type";

@Test
public void testDefaultType() {
Assertions.assertEquals(DEFAULT_TYPE, storageProperties.getDefaultType());
}

@Test
public void testStorageTypeEndpoint() {
Assertions.assertEquals(
DEFAULT_ENDPOINT, storageProperties.getTypes().get(DEFAULT_TYPE).getEndpoint());
}

@Test
public void testStorageTypeLookup() {
Assertions.assertEquals(
ANOTHER_ENDPOINT, storageProperties.getTypes().get(ANOTHER_TYPE).getEndpoint());
}

@Test
public void testStorageTypeVariableProperties() {
Assertions.assertFalse(
storageProperties.getTypes().get(DEFAULT_TYPE).getParameters().isEmpty());
}

@Test
public void testUnsetPropertiesAreNull() {
Assertions.assertNull(storageProperties.getTypes().get(NON_EXISTING_TYPE));
}

@AfterAll
static void unsetSysProp() {
System.clearProperty("OPENHOUSE_CLUSTER_CONFIG_PATH");
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package com.linkedin.openhouse.tables.mock.storage;

import com.linkedin.openhouse.cluster.storage.StorageType;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;

public class StorageTypeEnumTest {

/**
* This test demonstrates the ability to extends the StorageType class to add new storage types.
*/
public static class ExtendedStorageType extends StorageType {
public static final Type GCS = new Type("gcs");
public static final Type S3 = new Type("s3");

@Override
public Type fromString(String type) {
if (type.equals(GCS.getValue())) {
return GCS;
} else if (type.equals(S3.getValue())) {
return S3;
}
return super.fromString(type);
}
}

@Test
public void testEnumEquality() {
Assertions.assertSame(StorageType.LOCAL, StorageType.LOCAL);
Assertions.assertNotSame(StorageType.HDFS, StorageType.LOCAL);
Assertions.assertNotSame(ExtendedStorageType.GCS, StorageType.HDFS);
Assertions.assertSame(ExtendedStorageType.HDFS, StorageType.HDFS);
Assertions.assertTrue(StorageType.LOCAL.equals(ExtendedStorageType.LOCAL));
}

@Test
public void testValueEquality() {
Assertions.assertEquals("local", StorageType.LOCAL.getValue());
Assertions.assertEquals("hdfs", StorageType.HDFS.getValue());
Assertions.assertEquals("gcs", ExtendedStorageType.GCS.getValue());
Assertions.assertEquals("s3", ExtendedStorageType.S3.getValue());
Assertions.assertEquals(StorageType.LOCAL.getValue(), ExtendedStorageType.LOCAL.getValue());
}

@Test
public void testTypeFromString() {
// Allows StorageType to be extended with new types. A primary bean can be provided.
StorageType storageType = new ExtendedStorageType();
Assertions.assertSame(StorageType.LOCAL, storageType.fromString("local"));
Assertions.assertSame(StorageType.HDFS, storageType.fromString("hdfs"));
Assertions.assertSame(ExtendedStorageType.GCS, storageType.fromString("gcs"));
Assertions.assertSame(ExtendedStorageType.S3, storageType.fromString("s3"));
Assertions.assertSame(StorageType.LOCAL, storageType.fromString("local"));
Assertions.assertSame(StorageType.HDFS, storageType.fromString("hdfs"));
Assertions.assertSame(ExtendedStorageType.GCS, storageType.fromString("gcs"));
Assertions.assertSame(ExtendedStorageType.S3, storageType.fromString("s3"));
}

@Test
public void testExceptionForInvalidString() {
StorageType storageType = new StorageType();
Assertions.assertThrows(
IllegalArgumentException.class, () -> storageType.fromString("non-existing-type"));
Assertions.assertThrows(IllegalArgumentException.class, () -> storageType.fromString(null));
}
}
14 changes: 14 additions & 0 deletions services/tables/src/test/resources/cluster-test-properties.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,20 @@ cluster:
name: "TestCluster"
storage:
root-path: "/tmp/unittest"
storages:
default-type: "hdfs"
types:
hdfs:
rootpath: "/tmp/unittest"
endpoint: "hdfs://localhost:9000"
parameters:
key1: value1
objectstore:
rootpath: "tmpbucket"
endpoint: "http://localhost:9000"
parameters:
key2: value2
token: xyz
housetables:
base-uri: "http://localhost:8080"
security:
Expand Down

0 comments on commit 57d03fe

Please sign in to comment.