From 1b9bea09737dadfc2128c9398fc1d362027543f7 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Wed, 3 Oct 2018 16:38:41 +0900 Subject: [PATCH 01/60] HADOOP-15814. Maven 3.3.3 unable to parse pom file. Contributed by Wei-Chiu Chuang. (cherry picked from commit 2626f46691e1e1ad09967d0931a79b95e308c8b8) --- hadoop-project/pom.xml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e7baee003cba1..ffa2646134469 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1656,7 +1656,9 @@ maven-javadoc-plugin ${maven-javadoc-plugin.version} - -Xmaxwarns 10000 + + -Xmaxwarns 10000 + From e5e9d7b595fd44eb36db2c2846792fed82d7757c Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 3 Oct 2018 12:53:56 +0100 Subject: [PATCH 02/60] HADOOP-15795. Make HTTPS the default protocol for ABFS. Contributed by Da Zhou. (cherry picked from commit 7051bd78b17b2666c2fa0f61823920285a060a76) --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 8 ++ .../fs/azurebfs/AzureBlobFileSystem.java | 4 +- .../fs/azurebfs/AzureBlobFileSystemStore.java | 12 +-- .../azurebfs/SecureAzureBlobFileSystem.java | 2 +- .../azurebfs/constants/ConfigurationKeys.java | 1 + .../constants/FileSystemConfigurations.java | 2 + .../ITestAzureBlobFileSystemFileStatus.java | 2 +- .../fs/azurebfs/ITestClientUrlScheme.java | 101 ++++++++++++++++++ .../fs/azurebfs/ITestOauthOverAbfsScheme.java | 63 +++++++++++ .../contract/AbfsFileSystemContract.java | 2 +- .../services/TestOauthFailOverHttp.java | 55 ---------- 11 files changed, 185 insertions(+), 67 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java delete mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestOauthFailOverHttp.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index c57c34097c6c1..58e12a84ab25f 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -162,6 +162,10 @@ public class AbfsConfiguration{ DefaultValue = "") private String abfsExternalAuthorizationClass; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ALWAYS_USE_HTTPS, + DefaultValue = DEFAULT_ENABLE_HTTPS) + private boolean alwaysUseHttps; + private Map storageAccountKeys; public AbfsConfiguration(final Configuration rawConfig, String accountName) @@ -433,6 +437,10 @@ public AbfsDelegationTokenManager getDelegationTokenManager() throws IOException return new AbfsDelegationTokenManager(getRawConfiguration()); } + public boolean isHttpsAlwaysUsed() { + return this.alwaysUseHttps; + } + public AccessTokenProvider getTokenProvider() throws TokenAccessProviderException { AuthType authType = getEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey); if (authType == AuthType.OAuth) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 4b521e13771c5..7c0af860e29b4 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -105,7 +105,7 @@ public void initialize(URI uri, Configuration configuration) this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.userGroupInformation = UserGroupInformation.getCurrentUser(); this.user = userGroupInformation.getUserName(); - this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecure(), configuration, userGroupInformation); + this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecureScheme(), configuration, userGroupInformation); final AbfsConfiguration abfsConfiguration = abfsStore.getAbfsConfiguration(); this.setWorkingDirectory(this.getHomeDirectory()); @@ -154,7 +154,7 @@ public String toString() { return sb.toString(); } - public boolean isSecure() { + public boolean isSecureScheme() { return false; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index a735ce094ea4a..1ac1761352e3a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -115,7 +115,7 @@ public class AzureBlobFileSystemStore { private boolean isNamespaceEnabledSet; private boolean isNamespaceEnabled; - public AzureBlobFileSystemStore(URI uri, boolean isSecure, Configuration configuration, UserGroupInformation userGroupInformation) + public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration configuration, UserGroupInformation userGroupInformation) throws AzureBlobFileSystemException, IOException { this.uri = uri; @@ -142,13 +142,11 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecure, Configuration configu this.azureAtomicRenameDirSet = new HashSet<>(Arrays.asList( abfsConfiguration.getAzureAtomicRenameDirs().split(AbfsHttpConstants.COMMA))); - if (AuthType.OAuth == abfsConfiguration.getEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey) - && !FileSystemUriSchemes.ABFS_SECURE_SCHEME.equals(uri.getScheme())) { - throw new IllegalArgumentException( - String.format("Incorrect URI %s, URI scheme must be abfss when authenticating using Oauth.", uri)); - } + boolean usingOauth = (AuthType.OAuth == abfsConfiguration.getEnum( + FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey)); - initializeClient(uri, fileSystemName, accountName, isSecure); + boolean useHttps = (usingOauth || abfsConfiguration.isHttpsAlwaysUsed()) ? true : isSecureScheme; + initializeClient(uri, fileSystemName, accountName, useHttps); } private String[] authorityParts(URI uri) throws InvalidUriAuthorityException, InvalidUriException { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java index 15fe5427252fb..bc9530c66d207 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java @@ -28,7 +28,7 @@ @InterfaceStability.Evolving public class SecureAzureBlobFileSystem extends AzureBlobFileSystem { @Override - public boolean isSecure() { + public boolean isSecureScheme() { return true; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index 3e4ae21aeba73..97217f7f47ad8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -48,6 +48,7 @@ public final class ConfigurationKeys { public static final String AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION = "fs.azure.createRemoteFileSystemDuringInitialization"; public static final String AZURE_SKIP_USER_GROUP_METADATA_DURING_INITIALIZATION = "fs.azure.skipUserGroupMetadataDuringInitialization"; public static final String FS_AZURE_ENABLE_AUTOTHROTTLING = "fs.azure.enable.autothrottling"; + public static final String FS_AZURE_ALWAYS_USE_HTTPS = "fs.azure.always.use.https"; public static final String FS_AZURE_ATOMIC_RENAME_KEY = "fs.azure.atomic.rename.key"; public static final String FS_AZURE_READ_AHEAD_QUEUE_DEPTH = "fs.azure.readaheadqueue.depth"; public static final String FS_AZURE_ENABLE_FLUSH = "fs.azure.enable.flush"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index a9412a961c061..4949d59170a69 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -63,5 +63,7 @@ public final class FileSystemConfigurations { = SSLSocketFactoryEx.SSLChannelMode.Default; public static final boolean DEFAULT_ENABLE_DELEGATION_TOKEN = false; + public static final boolean DEFAULT_ENABLE_HTTPS = true; + private FileSystemConfigurations() {} } \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index 02f938f19f401..707a1452d50db 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -70,7 +70,7 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, String errorInStatus = "error in " + fileStatus + " from " + fs; // When running with Oauth, the owner and group info retrieved from server will be digit ids. - if (this.getAuthType() != AuthType.OAuth && !fs.isSecure()) { + if (this.getAuthType() != AuthType.OAuth && !fs.isSecureScheme()) { assertEquals(errorInStatus + ": owner", fs.getOwnerUser(), fileStatus.getOwner()); assertEquals(errorInStatus + ": group", diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java new file mode 100644 index 0000000000000..ad889838ff1d8 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.lang.reflect.Field; +import java.net.URL; +import java.util.Arrays; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; +import org.apache.hadoop.fs.azurebfs.services.AuthType; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ALWAYS_USE_HTTPS; + +/** + * Parameterized test of ABFS CLIENT URL scheme verification. + */ + +@RunWith(Parameterized.class) +public class ITestClientUrlScheme extends AbstractAbfsIntegrationTest{ + + @Parameterized.Parameter + public boolean useSecureScheme; + + @Parameterized.Parameter(1) + public boolean alwaysUseHttps; + + @Parameterized.Parameters + public static Iterable params() { + return Arrays.asList( + new Object[][]{ + {false, false}, + {false, true}, + {true, true}, + {true, false} + }); + } + + public ITestClientUrlScheme() throws Exception { + super(); + // authentication like OAUTH must use HTTPS + Assume.assumeTrue("ITestClientUrlScheme is skipped because auth type is not SharedKey", + getAuthType() == AuthType.SharedKey); + } + + @Test + public void testClientUrlScheme() throws Exception { + String[] urlWithoutScheme = this.getTestUrl().split(":"); + String fsUrl; + // update filesystem scheme + if (useSecureScheme) { + fsUrl = FileSystemUriSchemes.ABFS_SECURE_SCHEME + ":" + urlWithoutScheme[1]; + } else { + fsUrl = FileSystemUriSchemes.ABFS_SCHEME + ":" + urlWithoutScheme[1]; + } + + Configuration config = getRawConfiguration(); + config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUrl.toString()); + config.setBoolean(FS_AZURE_ALWAYS_USE_HTTPS, alwaysUseHttps); + + AbfsClient client = this.getFileSystem(config).getAbfsClient(); + + Field baseUrlField = AbfsClient.class. + getDeclaredField("baseUrl"); + baseUrlField.setAccessible(true); + + String url = ((URL) baseUrlField.get(client)).toString(); + + // HTTP is enabled only when "abfs://XXX" is used and FS_AZURE_ALWAYS_USE_HTTPS + // is set as false, otherwise HTTPS should be used. + if (!useSecureScheme && !alwaysUseHttps) { + Assert.assertTrue(url.startsWith(FileSystemUriSchemes.HTTP_SCHEME)); + } else { + Assert.assertTrue(url.startsWith(FileSystemUriSchemes.HTTPS_SCHEME)); + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java new file mode 100644 index 0000000000000..2c80ce85f4e77 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs; + +import java.lang.reflect.Field; +import java.net.URL; + +import org.junit.Assume; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; + +/** + * Test Oauth fail fast when uri scheme is incorrect. + */ +public class ITestOauthOverAbfsScheme extends AbstractAbfsIntegrationTest { + + public ITestOauthOverAbfsScheme() throws Exception { + Assume.assumeTrue("ITestOauthOverAbfsScheme is skipped because auth type is not OAuth", + getAuthType() == AuthType.OAuth); + } + + @Test + public void testOauthOverSchemeAbfs() throws Exception { + String[] urlWithoutScheme = this.getTestUrl().split(":"); + String fsUrl; + // update filesystem scheme to use abfs:// + fsUrl = FileSystemUriSchemes.ABFS_SCHEME + ":" + urlWithoutScheme[1]; + + Configuration config = getRawConfiguration(); + config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUrl.toString()); + + AbfsClient client = this.getFileSystem(config).getAbfsClient(); + + Field baseUrlField = AbfsClient.class. + getDeclaredField("baseUrl"); + baseUrlField.setAccessible(true); + String url = ((URL) baseUrlField.get(client)).toString(); + + Assume.assumeTrue("OAuth authentication over scheme abfs must use HTTPS", + url.startsWith(FileSystemUriSchemes.HTTPS_SCHEME)); + + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/AbfsFileSystemContract.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/AbfsFileSystemContract.java index c0c5f91fabc65..62bcca174ef8d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/AbfsFileSystemContract.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/AbfsFileSystemContract.java @@ -56,7 +56,7 @@ public Path getTestPath() { public String toString() { final StringBuilder sb = new StringBuilder( "AbfsFileSystemContract{"); - sb.append("isSecure=").append(isSecure); + sb.append("isSecureScheme=").append(isSecure); sb.append(super.toString()); sb.append('}'); return sb.toString(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestOauthFailOverHttp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestOauthFailOverHttp.java deleted file mode 100644 index de07c4b2b9134..0000000000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestOauthFailOverHttp.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.azurebfs.services; - -import java.net.URI; - -import org.junit.Test; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; - -import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME; -import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ABFS_ACCOUNT_NAME; -import static org.apache.hadoop.test.LambdaTestUtils.intercept; - -/** - * Test Oauth fail fast when uri scheme is incorrect. - */ -public class TestOauthFailOverHttp { - - @Test - public void testOauthFailWithSchemeAbfs() throws Exception { - Configuration conf = new Configuration(); - final String account = "fakeaccount.dfs.core.windows.net"; - conf.set(FS_AZURE_ABFS_ACCOUNT_NAME, account); - conf.setEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.OAuth); - URI defaultUri = new URI(FileSystemUriSchemes.ABFS_SCHEME, - "fakecontainer@" + account, - null, - null, - null); - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultUri.toString()); - // IllegalArgumentException is expected - // when authenticating using Oauth and scheme is not abfss - intercept(IllegalArgumentException.class, "Incorrect URI", - () -> FileSystem.get(conf)); - } -} From c6942a315b72ab6ea5ced4f2385616f0df0270eb Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 3 Oct 2018 12:59:16 +0100 Subject: [PATCH 03/60] HADOOP-15792. typo in AzureBlobFileSystem.getIsNamespaceEnabeld. Contributed by Abhishek Modi. (cherry picked from commit e8b8604314a2ea894b1f24939c42e782b83335aa) --- .../fs/azurebfs/AzureBlobFileSystem.java | 18 +-- ...ITestAzureBlobFileSystemAuthorization.java | 32 ++--- .../ITestAzureBlobFileSystemBackCompat.java | 2 +- .../ITestAzureBlobFileSystemPermission.java | 4 +- .../ITestAzureBlobFileSystemRandomRead.java | 2 +- .../azurebfs/ITestAzureBlobFilesystemAcl.java | 116 +++++++++--------- .../azurebfs/ITestWasbAbfsCompatibility.java | 8 +- 7 files changed, 91 insertions(+), 91 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 7c0af860e29b4..7d805421d944c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -553,7 +553,7 @@ public void setOwner(final Path path, final String owner, final String group) throws IOException { LOG.debug( "AzureBlobFileSystem.setOwner path: {}", path); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { super.setOwner(path, owner, group); return; } @@ -584,7 +584,7 @@ public void setOwner(final Path path, final String owner, final String group) public void setPermission(final Path path, final FsPermission permission) throws IOException { LOG.debug("AzureBlobFileSystem.setPermission path: {}", path); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { super.setPermission(path, permission); return; } @@ -619,7 +619,7 @@ public void modifyAclEntries(final Path path, final List aclSpec) throws IOException { LOG.debug("AzureBlobFileSystem.modifyAclEntries path: {}", path.toString()); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "modifyAclEntries is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -653,7 +653,7 @@ public void removeAclEntries(final Path path, final List aclSpec) throws IOException { LOG.debug("AzureBlobFileSystem.removeAclEntries path: {}", path); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "removeAclEntries is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -683,7 +683,7 @@ public void removeAclEntries(final Path path, final List aclSpec) public void removeDefaultAcl(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.removeDefaultAcl path: {}", path); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "removeDefaultAcl is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -711,7 +711,7 @@ public void removeDefaultAcl(final Path path) throws IOException { public void removeAcl(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.removeAcl path: {}", path); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "removeAcl is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -742,7 +742,7 @@ public void setAcl(final Path path, final List aclSpec) throws IOException { LOG.debug("AzureBlobFileSystem.setAcl path: {}", path); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "setAcl is only supported by storage accounts with the hierarchical " + "namespace enabled."); @@ -773,7 +773,7 @@ public void setAcl(final Path path, final List aclSpec) public AclStatus getAclStatus(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.getAclStatus path: {}", path.toString()); - if (!getIsNamespaceEnabeld()) { + if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "getAclStatus is only supported by storage account with the " + "hierarchical namespace enabled."); @@ -1005,7 +1005,7 @@ AbfsClient getAbfsClient() { } @VisibleForTesting - boolean getIsNamespaceEnabeld() throws AzureBlobFileSystemException { + boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { return abfsStore.getIsNamespaceEnabled(); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java index 17055169773b2..d73f5348d4a0e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java @@ -203,7 +203,7 @@ public void testGetFileStatusUnauthorized() throws Exception { @Test public void testSetOwnerAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); fs.setOwner(TEST_WRITE_ONLY_FILE_PATH_0, "testUser", "testGroup"); } @@ -211,7 +211,7 @@ public void testSetOwnerAuthorized() throws Exception { @Test public void testSetOwnerUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); intercept(AbfsAuthorizationException.class, ()-> { @@ -222,7 +222,7 @@ public void testSetOwnerUnauthorized() throws Exception { @Test public void testSetPermissionAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); fs.setPermission(TEST_WRITE_ONLY_FILE_PATH_0, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); } @@ -230,7 +230,7 @@ public void testSetPermissionAuthorized() throws Exception { @Test public void testSetPermissionUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); intercept(AbfsAuthorizationException.class, ()-> { @@ -241,7 +241,7 @@ public void testSetPermissionUnauthorized() throws Exception { @Test public void testModifyAclEntriesAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); fs.modifyAclEntries(TEST_WRITE_ONLY_FILE_PATH_0, aclSpec); @@ -250,7 +250,7 @@ public void testModifyAclEntriesAuthorized() throws Exception { @Test public void testModifyAclEntriesUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); intercept(AbfsAuthorizationException.class, @@ -262,7 +262,7 @@ public void testModifyAclEntriesUnauthorized() throws Exception { @Test public void testRemoveAclEntriesAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); fs.removeAclEntries(TEST_WRITE_ONLY_FILE_PATH_0, aclSpec); @@ -271,7 +271,7 @@ public void testRemoveAclEntriesAuthorized() throws Exception { @Test public void testRemoveAclEntriesUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); intercept(AbfsAuthorizationException.class, @@ -283,7 +283,7 @@ public void testRemoveAclEntriesUnauthorized() throws Exception { @Test public void testRemoveDefaultAclAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); fs.removeDefaultAcl(TEST_WRITE_ONLY_FILE_PATH_0); } @@ -291,7 +291,7 @@ public void testRemoveDefaultAclAuthorized() throws Exception { @Test public void testRemoveDefaultAclUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); intercept(AbfsAuthorizationException.class, ()-> { @@ -302,7 +302,7 @@ public void testRemoveDefaultAclUnauthorized() throws Exception { @Test public void testRemoveAclAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); fs.removeAcl(TEST_WRITE_ONLY_FILE_PATH_0); } @@ -310,7 +310,7 @@ public void testRemoveAclAuthorized() throws Exception { @Test public void testRemoveAclUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); intercept(AbfsAuthorizationException.class, ()-> { @@ -321,7 +321,7 @@ public void testRemoveAclUnauthorized() throws Exception { @Test public void testSetAclAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); fs.setAcl(TEST_WRITE_ONLY_FILE_PATH_0, aclSpec); @@ -330,7 +330,7 @@ public void testSetAclAuthorized() throws Exception { @Test public void testSetAclUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); intercept(AbfsAuthorizationException.class, @@ -342,7 +342,7 @@ public void testSetAclUnauthorized() throws Exception { @Test public void testGetAclStatusAuthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_THEN_READ_ONLY_PATH).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); fs.getAclStatus(TEST_WRITE_THEN_READ_ONLY_PATH); @@ -351,7 +351,7 @@ public void testGetAclStatusAuthorized() throws Exception { @Test public void testGetAclStatusUnauthorized() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabeld()); + assumeTrue("This test case only runs when namespace is enabled", fs.getIsNamespaceEnabled()); fs.create(TEST_WRITE_ONLY_FILE_PATH_0).close(); List aclSpec = Arrays.asList(aclEntry(ACCESS, GROUP, "bar", FsAction.ALL)); intercept(AbfsAuthorizationException.class, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java index d8940f7d75301..22d4990cc42db 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java @@ -45,7 +45,7 @@ public ITestAzureBlobFileSystemBackCompat() throws Exception { public void testBlobBackCompat() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse(fs.getIsNamespaceEnabeld()); + Assume.assumeFalse(fs.getIsNamespaceEnabled()); String storageConnectionString = getBlobConnectionString(); CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); CloudBlobClient blobClient = storageAccount.createCloudBlobClient(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java index bbb2e240bee55..257fb4fdbd2ab 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java @@ -76,7 +76,7 @@ public static Collection abfsCreateNonRecursiveTestData() public void testFilePermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(fs.getIsNamespaceEnabled()); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); path = new Path(testRoot, UUID.randomUUID().toString()); @@ -92,7 +92,7 @@ public void testFilePermission() throws Exception { @Test public void testFolderPermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(fs.getIsNamespaceEnabled()); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "027"); path = new Path(testRoot, UUID.randomUUID().toString()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index 38e7133ed8e5b..768f4bbc5d988 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -525,7 +525,7 @@ private static double toMbps(long bytes, long milliseconds) { private void createTestFile() throws Exception { final AzureBlobFileSystem abFs = this.getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse(abFs.getIsNamespaceEnabeld()); + Assume.assumeFalse(abFs.getIsNamespaceEnabled()); FileSystem fs = this.getWasbFileSystem(); if (fs.exists(TEST_FILE_PATH)) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java index acafe03be0d2d..7377132e2a0a9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java @@ -80,7 +80,7 @@ public ITestAzureBlobFilesystemAcl() throws Exception { @Test public void testModifyAclEntries() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.mkdirs(path, FsPermission.createImmutable((short) RWX_RX)); @@ -113,7 +113,7 @@ public void testModifyAclEntries() throws Exception { @Test public void testModifyAclEntriesOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -137,7 +137,7 @@ public void testModifyAclEntriesOnlyAccess() throws Exception { @Test public void testModifyAclEntriesOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -160,7 +160,7 @@ public void testModifyAclEntriesOnlyDefault() throws Exception { @Test public void testModifyAclEntriesMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -178,7 +178,7 @@ public void testModifyAclEntriesMinimal() throws Exception { @Test public void testModifyAclEntriesMinimalDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -198,7 +198,7 @@ public void testModifyAclEntriesMinimalDefault() throws Exception { @Test public void testModifyAclEntriesCustomMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -217,7 +217,7 @@ public void testModifyAclEntriesCustomMask() throws Exception { @Test public void testModifyAclEntriesStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -247,7 +247,7 @@ public void testModifyAclEntriesStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testModifyAclEntriesPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. List aclSpec = Lists.newArrayList( @@ -261,7 +261,7 @@ public void testModifyAclEntriesPathNotFound() throws Exception { @Test (expected=Exception.class) public void testModifyAclEntriesDefaultOnFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -273,7 +273,7 @@ public void testModifyAclEntriesDefaultOnFile() throws Exception { @Test public void testRemoveAclEntries() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -301,7 +301,7 @@ public void testRemoveAclEntries() throws Exception { @Test public void testRemoveAclEntriesOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -326,7 +326,7 @@ public void testRemoveAclEntriesOnlyAccess() throws Exception { @Test public void testRemoveAclEntriesOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -353,7 +353,7 @@ public void testRemoveAclEntriesOnlyDefault() throws Exception { @Test public void testRemoveAclEntriesMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RWX_RW)); @@ -376,7 +376,7 @@ public void testRemoveAclEntriesMinimal() throws Exception { @Test public void testRemoveAclEntriesMinimalDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -404,7 +404,7 @@ public void testRemoveAclEntriesMinimalDefault() throws Exception { @Test public void testRemoveAclEntriesStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -432,7 +432,7 @@ public void testRemoveAclEntriesStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testRemoveAclEntriesPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. List aclSpec = Lists.newArrayList( @@ -443,7 +443,7 @@ public void testRemoveAclEntriesPathNotFound() throws Exception { @Test public void testRemoveDefaultAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -465,7 +465,7 @@ public void testRemoveDefaultAcl() throws Exception { @Test public void testRemoveDefaultAclOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -487,7 +487,7 @@ public void testRemoveDefaultAclOnlyAccess() throws Exception { @Test public void testRemoveDefaultAclOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -503,7 +503,7 @@ public void testRemoveDefaultAclOnlyDefault() throws Exception { @Test public void testRemoveDefaultAclMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); fs.removeDefaultAcl(path); @@ -516,7 +516,7 @@ public void testRemoveDefaultAclMinimal() throws Exception { @Test public void testRemoveDefaultAclStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -538,7 +538,7 @@ public void testRemoveDefaultAclStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testRemoveDefaultAclPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. fs.removeDefaultAcl(path); @@ -547,7 +547,7 @@ public void testRemoveDefaultAclPathNotFound() throws Exception { @Test public void testRemoveAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -569,7 +569,7 @@ public void testRemoveAcl() throws Exception { @Test public void testRemoveAclMinimalAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -583,7 +583,7 @@ public void testRemoveAclMinimalAcl() throws Exception { @Test public void testRemoveAclStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -603,7 +603,7 @@ public void testRemoveAclStickyBit() throws Exception { @Test public void testRemoveAclOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -622,7 +622,7 @@ public void testRemoveAclOnlyDefault() throws Exception { @Test(expected=FileNotFoundException.class) public void testRemoveAclPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. fs.removeAcl(path); @@ -631,7 +631,7 @@ public void testRemoveAclPathNotFound() throws Exception { @Test public void testSetAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -657,7 +657,7 @@ public void testSetAcl() throws Exception { @Test public void testSetAclOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -678,7 +678,7 @@ public void testSetAclOnlyAccess() throws Exception { @Test public void testSetAclOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -698,7 +698,7 @@ public void testSetAclOnlyDefault() throws Exception { @Test public void testSetAclMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R_R)); @@ -722,7 +722,7 @@ public void testSetAclMinimal() throws Exception { @Test public void testSetAclMinimalDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -742,7 +742,7 @@ public void testSetAclMinimalDefault() throws Exception { @Test public void testSetAclCustomMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -764,7 +764,7 @@ public void testSetAclCustomMask() throws Exception { @Test public void testSetAclStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -790,7 +790,7 @@ public void testSetAclStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testSetAclPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. List aclSpec = Lists.newArrayList( @@ -804,7 +804,7 @@ public void testSetAclPathNotFound() throws Exception { @Test(expected=Exception.class) public void testSetAclDefaultOnFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -816,7 +816,7 @@ public void testSetAclDefaultOnFile() throws Exception { @Test public void testSetPermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -843,7 +843,7 @@ public void testSetPermission() throws Exception { @Test public void testSetPermissionOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -865,7 +865,7 @@ public void testSetPermissionOnlyAccess() throws Exception { @Test public void testSetPermissionOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -889,7 +889,7 @@ public void testSetPermissionOnlyDefault() throws Exception { @Test public void testDefaultAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -909,7 +909,7 @@ public void testDefaultAclNewFile() throws Exception { @Ignore // wait umask fix to be deployed public void testOnlyAccessAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -927,7 +927,7 @@ public void testOnlyAccessAclNewFile() throws Exception { @Ignore // wait investigation in service public void testDefaultMinimalAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -946,7 +946,7 @@ public void testDefaultMinimalAclNewFile() throws Exception { @Test public void testDefaultAclNewDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -973,7 +973,7 @@ public void testDefaultAclNewDir() throws Exception { @Ignore // wait umask fix to be deployed public void testOnlyAccessAclNewDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -991,7 +991,7 @@ public void testOnlyAccessAclNewDir() throws Exception { @Ignore // wait investigation in service public void testDefaultMinimalAclNewDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1013,7 +1013,7 @@ public void testDefaultMinimalAclNewDir() throws Exception { @Test public void testDefaultAclNewFileWithMode() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX_RX)); List aclSpec = Lists.newArrayList( @@ -1035,7 +1035,7 @@ public void testDefaultAclNewFileWithMode() throws Exception { @Test public void testDefaultAclNewDirWithMode() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX_RX)); List aclSpec = Lists.newArrayList( @@ -1059,7 +1059,7 @@ public void testDefaultAclNewDirWithMode() throws Exception { @Test public void testDefaultAclRenamedFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); Path dirPath = new Path(path, "dir"); FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short) RWX_RX)); @@ -1081,7 +1081,7 @@ public void testDefaultAclRenamedFile() throws Exception { @Test public void testDefaultAclRenamedDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); path = new Path(testRoot, UUID.randomUUID().toString()); Path dirPath = new Path(path, "dir"); FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short) RWX_RX)); @@ -1102,7 +1102,7 @@ public void testDefaultAclRenamedDir() throws Exception { @Test public void testEnsureAclOperationWorksForRoot() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabeld()); + assumeTrue(fs.getIsNamespaceEnabled()); Path rootPath = new Path("/"); @@ -1127,7 +1127,7 @@ public void testEnsureAclOperationWorksForRoot() throws Exception { @Test public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); @@ -1144,7 +1144,7 @@ public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { @Test public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); @@ -1163,7 +1163,7 @@ public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { @Test public void testModifyAclEntriesForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1180,7 +1180,7 @@ public void testModifyAclEntriesForNonNamespaceEnabledAccount() throws Exception @Test public void testRemoveAclEntriesEntriesForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1197,7 +1197,7 @@ public void testRemoveAclEntriesEntriesForNonNamespaceEnabledAccount() throws Ex @Test public void testRemoveDefaultAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1211,7 +1211,7 @@ public void testRemoveDefaultAclForNonNamespaceEnabledAccount() throws Exception @Test public void testRemoveAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1225,7 +1225,7 @@ public void testRemoveAclForNonNamespaceEnabledAccount() throws Exception { @Test public void testSetAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1242,7 +1242,7 @@ public void testSetAclForNonNamespaceEnabledAccount() throws Exception { @Test public void testGetAclStatusForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabeld()); + Assume.assumeTrue(!fs.getIsNamespaceEnabled()); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java index 33a5805ec98aa..fdf9788c36fd9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java @@ -59,7 +59,7 @@ public void testListFileStatus() throws Exception { // crate file using abfs AzureBlobFileSystem fs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse(fs.getIsNamespaceEnabeld()); + Assume.assumeFalse(fs.getIsNamespaceEnabled()); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -93,7 +93,7 @@ public void testReadFile() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse(abfs.getIsNamespaceEnabeld()); + Assume.assumeFalse(abfs.getIsNamespaceEnabled()); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -132,7 +132,7 @@ public void testDir() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse(abfs.getIsNamespaceEnabeld()); + Assume.assumeFalse(abfs.getIsNamespaceEnabled()); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -166,7 +166,7 @@ public void testSetWorkingDirectory() throws Exception { //create folders AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse(abfs.getIsNamespaceEnabeld()); + Assume.assumeFalse(abfs.getIsNamespaceEnabled()); NativeAzureFileSystem wasb = getWasbFileSystem(); From 4de3cf1968a693a9ea5330773f93b1e52b717358 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 4 Oct 2018 09:51:42 +0900 Subject: [PATCH 04/60] YARN-8840. Add missing cleanupSSLConfig() call for TestTimelineClient test. Contributed by Aki Tanaka. (cherry picked from commit 39b35036ba47064149003046a7b59feb01575d1e) --- .../client/api/impl/TestTimelineClient.java | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index e3fffef614341..715f7e5ff204d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -25,7 +25,6 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; -import java.io.File; import java.io.IOException; import java.net.ConnectException; import java.net.SocketTimeoutException; @@ -62,6 +61,8 @@ public class TestTimelineClient { private TimelineClientImpl client; private TimelineWriter spyTimelineWriter; + private String keystoresDir; + private String sslConfDir; @Before public void setup() { @@ -72,10 +73,13 @@ public void setup() { } @After - public void tearDown() { + public void tearDown() throws Exception { if (client != null) { client.stop(); } + if (isSSLConfigured()) { + KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); + } } @Test @@ -454,11 +458,7 @@ public void testTimelineClientCleanup() throws Exception { conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES, 0); conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, Policy.HTTPS_ONLY.name()); - File testDir = TestGenericTestUtils.getTestDir(); - String sslConfDir = - KeyStoreTestUtil.getClasspathDir(TestTimelineClient.class); - KeyStoreTestUtil.setupSSLConfig(testDir.getAbsolutePath(), - sslConfDir, conf, false); + setupSSLConfig(conf); client = createTimelineClient(conf); ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); @@ -492,6 +492,17 @@ public void testTimelineClientCleanup() throws Exception { Assert.assertFalse("Reloader is still alive", reloaderStillAlive); } + private void setupSSLConfig(YarnConfiguration conf) throws Exception { + keystoresDir = TestGenericTestUtils.getTestDir().getAbsolutePath(); + sslConfDir = + KeyStoreTestUtil.getClasspathDir(TestTimelineClient.class); + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); + } + + private boolean isSSLConfigured() { + return keystoresDir != null && sslConfDir != null; + } + private static class TestTimelineDelegationTokenSecretManager extends AbstractDelegationTokenSecretManager { From e185ae2d17e1ac4e432549fde077a5ee21041d8f Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Thu, 4 Oct 2018 10:30:30 +0800 Subject: [PATCH 05/60] HADOOP-15817. Reuse Object Mapper in KMSJSONReader. Contributed by Jonathan Eagles. (cherry picked from commit 81f635f47f0737eb551bef1aa55afdf7b268253d) --- .../apache/hadoop/crypto/key/kms/server/KMSJSONReader.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java index f6f670be0ae8c..af781f5277850 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java @@ -38,6 +38,7 @@ @Consumes(MediaType.APPLICATION_JSON) @InterfaceAudience.Private public class KMSJSONReader implements MessageBodyReader { + private static final ObjectMapper MAPPER = new ObjectMapper(); @Override public boolean isReadable(Class type, Type genericType, @@ -51,7 +52,6 @@ public Object readFrom(Class type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, InputStream entityStream) throws IOException, WebApplicationException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(entityStream, type); + return MAPPER.readValue(entityStream, type); } } From abe4a8e5d82e70ce991e2830f20eba9f25a2491a Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Thu, 4 Oct 2018 10:31:33 -0700 Subject: [PATCH 06/60] YARN-8758. Support getting PreemptionMessage when using AMRMClientAsyn. (Zian Chen via wangda) Change-Id: Ibf5d165f49957b582eeadeb41dc285c84d2f05e7 (cherry picked from commit 6926fd0ec634df2576bbc9f45e9636b99260db72) --- .../hadoop/yarn/client/api/async/AMRMClientAsync.java | 11 +++++++++++ .../client/api/async/impl/AMRMClientAsyncImpl.java | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java index 3dd2f718ba69b..c9f4e5f79a06b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.PreemptionMessage; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest; import org.apache.hadoop.yarn.api.records.Resource; @@ -553,6 +554,16 @@ public void onContainersReceivedFromPreviousAttempts( public void onRequestsRejected( List rejectedSchedulingRequests) { } + + /** + * Called when the RM responds to a heartbeat with preemption message + * @param preemptionMessage + */ + @Public + @Unstable + public void onPreemptionMessageReceived( + PreemptionMessage preemptionMessage) { + } } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java index 3cf2c3496ef50..922b185a1a2dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.PreemptionMessage; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest; import org.apache.hadoop.yarn.api.records.Resource; @@ -401,6 +402,14 @@ public void run() { handler.onContainersAllocated(allocated); } + PreemptionMessage preemptionMessage = response.getPreemptionMessage(); + if (preemptionMessage != null) { + if (handler instanceof AMRMClientAsync.AbstractCallbackHandler) { + ((AMRMClientAsync.AbstractCallbackHandler) handler) + .onPreemptionMessageReceived(preemptionMessage); + } + } + if (!response.getContainersFromPreviousAttempts().isEmpty()) { if (handler instanceof AMRMClientAsync.AbstractCallbackHandler) { ((AMRMClientAsync.AbstractCallbackHandler) handler) From b3ac8869338faeabedbc27e95e3166c830a9a761 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Thu, 4 Oct 2018 10:48:47 -0700 Subject: [PATCH 07/60] YARN-8844. TestNMProxy unit test is failing. (Eric Yang via wangda) Change-Id: I241fa8701b6f1dbcad87fd2e9a429e32e7aa40f5 --- .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java index e6a7a02e462d9..5f023f02df1d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java @@ -161,7 +161,7 @@ public void testNMProxyRPCRetry() throws Exception { IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 100); // connect to some dummy address so that it can trigger // connection failure and RPC level retires. - newConf.set(YarnConfiguration.NM_ADDRESS, "1234"); + newConf.set(YarnConfiguration.NM_ADDRESS, "0.0.0.0:1234"); ContainerManagementProtocol proxy = getNMProxy(newConf); try { proxy.startContainers(allRequests); From 62d02eecd0079a9f1fbfb18743c5324a61a03a7c Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Fri, 5 Oct 2018 09:55:08 +0800 Subject: [PATCH 08/60] HDFS-13957. Fix incorrect option used in description of InMemoryAliasMap. (cherry picked from commit 619e490333fa89601fd476dedac6d16610e9a52a) --- .../hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md index b8d5321534392..21145e6908ea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md @@ -152,7 +152,7 @@ Currently, the following two types of alias maps are supported. This is a LevelDB-based alias map that runs as a separate server in Namenode. The alias map itself can be created using the `fs2img` tool using the option -`-Ddfs.provided.aliasmap.leveldb.path=file:///path/to/leveldb/map/dingos.db -o org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap` +`-Ddfs.provided.aliasmap.leveldb.path=file:///path/to/leveldb/map/dingos.db -b org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap` as in the example above. Datanodes contact this alias map using the `org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol` protocol. From df189bf50eb90a8caca98fe2516bf9240296e266 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Fri, 5 Oct 2018 14:57:45 +0900 Subject: [PATCH 09/60] HADOOP-15816. Upgrade Apache Zookeeper version due to security concerns. Contributed by Akira Ajisaka. (cherry picked from commit 241cbec2dab3c3d49b48f42b86e8bd85cd1f08f3) --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index ffa2646134469..c6d51c099002a 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -86,7 +86,7 @@ 2.5.0 ${env.HADOOP_PROTOC_PATH} - 3.4.9 + 3.4.13 2.12.0 3.0.0 3.1.0-RC1 From 8957a61ed9670bd4c61d77a4f43bf18116cb81a2 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Fri, 5 Oct 2018 12:28:09 +0530 Subject: [PATCH 10/60] YARN-7957. [UI2] YARN service delete option disappears after stopping application. Contributed by Akhil PB. (cherry picked from commit 751f626e506b6cf8cc87ca65b65550871dca74fb) --- .../main/webapp/app/adapters/yarn-service.js | 32 ++++++++++++++ .../app/controllers/app-table-columns.js | 14 ++++-- .../main/webapp/app/controllers/yarn-app.js | 20 ++++++++- .../src/main/webapp/app/models/yarn-app.js | 15 ++++++- .../main/webapp/app/models/yarn-service.js | 30 +++++++++++++ .../src/main/webapp/app/routes/yarn-app.js | 12 +++++ .../main/webapp/app/serializers/yarn-app.js | 4 +- .../webapp/app/serializers/yarn-service.js | 44 +++++++++++++++++++ .../main/webapp/app/templates/yarn-app.hbs | 28 ++++++++---- .../webapp/app/templates/yarn-services.hbs | 4 +- 10 files changed, 185 insertions(+), 18 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service.js diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service.js new file mode 100644 index 0000000000000..221281047496a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service.js @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import RESTAbstractAdapter from './restabstract'; + +export default RESTAbstractAdapter.extend({ + address: "rmWebAddress", + restNameSpace: "dashService", + serverName: "DASH", + + urlForQueryRecord(query/*, modelName*/) { + var url = this.buildURL(); + url += '/' + query.serviceName; + delete query.serviceName; + return url; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js index 552a157dd1f00..c2cb0bd075e06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js @@ -85,7 +85,7 @@ export default Ember.Controller.extend({ contentPath: 'startTime', facetType: null, getCellContent: function(row) { - return Converter.timeStampToDate(row.get('startTime')); + return row.get('formattedStartTime'); } }, { id: 'elTime', @@ -100,7 +100,10 @@ export default Ember.Controller.extend({ headerTitle: 'Finished Time', contentPath: 'validatedFinishedTs', facetType: null, - observePath: true + observePath: true, + getCellContent: function(row) { + return row.get('formattedFinishedTime'); + } }, { id: 'priority', headerTitle: 'Priority', @@ -174,14 +177,17 @@ export default Ember.Controller.extend({ contentPath: 'startTime', facetType: null, getCellContent: function(row) { - return Converter.timeStampToDate(row.get('startTime')); + return row.get('formattedStartTime'); } }, { id: 'finishTime', headerTitle: 'Finished Time', contentPath: 'validatedFinishedTs', facetType: null, - observePath: true + observePath: true, + getCellContent: function(row) { + return row.get('formattedFinishedTime'); + } }); return ColumnDef.make(colums); }.property(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js index 799c8d201834b..8b48347e68db3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js @@ -160,11 +160,29 @@ export default Ember.Controller.extend({ return amHostAddress; }), - isKillable: Ember.computed("model.app.state", function () { + isAppKillable: Ember.computed("model.app.state", function () { if (this.get("model.app.applicationType") === 'yarn-service') { return false; } const killableStates = ['NEW', 'NEW_SAVING', 'SUBMITTED', 'ACCEPTED', 'RUNNING']; return killableStates.indexOf(this.get("model.app.state")) > -1; + }), + + isServiceDeployedOrRunning: Ember.computed('model.serviceInfo', function() { + const serviceInfo = this.get('model.serviceInfo'); + const stoppedStates = ['STOPPED', 'SUCCEEDED', 'FAILED']; + if (serviceInfo) { + return stoppedStates.indexOf(serviceInfo.get('state')) === -1; + } + return false; + }), + + isServiceStoppped: Ember.computed('model.serviceInfo', function() { + const serviceInfo = this.get('model.serviceInfo'); + const stoppedStates = ['STOPPED', 'SUCCEEDED']; + if (serviceInfo) { + return stoppedStates.indexOf(serviceInfo.get('state')) > -1; + } + return false; }) }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js index 8f4a8993b78aa..f0d6a72d9d89b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js @@ -24,10 +24,10 @@ export default DS.Model.extend({ user: DS.attr("string"), queue: DS.attr("string"), state: DS.attr("string"), - startTime: DS.attr("string"), + startTime: DS.attr("number"), elapsedTime: DS.attr("string"), finalStatus: DS.attr("string"), - finishedTime: DS.attr("finishedTime"), + finishedTime: DS.attr("number"), progress: DS.attr("number"), diagnostics: DS.attr("string"), amHostHttpAddress: DS.attr("string"), @@ -71,6 +71,17 @@ export default DS.Model.extend({ return this.get("finishedTime") >= this.get("startTime"); }.property("hasFinishedTime"), + formattedStartTime: function() { + return Converter.timeStampToDate(this.get('startTime')); + }.property('startTime'), + + formattedFinishedTime: function() { + if (this.get("finishedTime") < this.get("startTime")) { + return "N/A"; + } + return Converter.timeStampToDate(this.get("finishedTime")); + }.property('finishedTime'), + formattedElapsedTime: function() { return Converter.msToElapsedTimeUnit(this.get("elapsedTime")); }.property("elapsedTime"), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service.js new file mode 100644 index 0000000000000..f7d114eb88511 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + appId: DS.attr('string'), + name: DS.attr('string'), + state: DS.attr('string'), + version: DS.attr('string'), + lifetime: DS.attr('string'), + components: DS.attr(), + configuration: DS.attr(), + quicklinks: DS.attr() +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js index 8cd44bd59b357..21f5c951f9937 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js @@ -37,6 +37,18 @@ export default AbstractRoute.extend(AppAttemptMixin, { return []; }, function () { return []; + }), + + serviceInfo: new Ember.RSVP.Promise(resolve => { + if (service) { + this.store.queryRecord('yarn-service', {serviceName: service}).then(function(info) { + resolve(info); + }, function() { + resolve(null); + }); + } else { + resolve(null); + } }) }); }, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js index f4de7257c00cd..b3d9f192c70a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js @@ -42,9 +42,9 @@ export default DS.JSONAPISerializer.extend({ user: payload.user, queue: payload.queue, state: payload.state, - startTime: payload.startedTime, // will be formatted in em-table + startTime: payload.startedTime, // will be formatted in yarn-app model elapsedTime: payload.elapsedTime, - finishedTime: Converter.timeStampToDate(payload.finishedTime), + finishedTime: payload.finishedTime, // will be formatted in yarn-app model finalStatus: payload.finalStatus, progress: payload.progress, applicationType: payload.applicationType, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service.js new file mode 100644 index 0000000000000..a96b28ba86bf5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service.js @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + internalNormalizeSingleResponse(store, primaryModelClass, payload) { + const fixedPayload = { + id: 'yarn_service_' + (payload.id || Date.now()), + type: primaryModelClass.modelName, + attributes: { + appId: payload.id, + name: payload.name, + state: payload.state, + version: payload.version, + lifetime: payload.lifetime, + components: payload.components, + configuration: payload.configuration, + quicklinks: payload.quicklinks + } + }; + return fixedPayload; + }, + + normalizeSingleResponse(store, primaryModelClass, payload/*, id, requestType*/) { + const pl = this.internalNormalizeSingleResponse(store, primaryModelClass, payload); + return {data: pl}; + } +}); \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs index 13d14e892aa8f..6e9bc08862758 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs @@ -60,14 +60,14 @@ {{model.app.user}} {{#if model.app.hasFinishedTime}} -
+
- Finished at {{model.app.validatedFinishedTs}} + Finished at {{model.app.formattedFinishedTime}}
{{else}}
- Started at {{model.app.startTime}} + Started at {{model.app.formattedStartTime}}
{{/if}}
@@ -75,21 +75,33 @@
' - }) - .component('pane', { - transclude: true, - require: { - tabsCtrl: '^tabs' - }, - bindings: { - title: '@' - }, - controller: function() { - this.$onInit = function() { - this.tabsCtrl.addPane(this); - }; - }, - template: '
' - }); - - angular.module('ozone').component('navmenu', { - bindings: { - metrics: '<' - }, - templateUrl: 'static/templates/menu.html', - controller: function($http) { - var ctrl = this; - ctrl.docs = false; - $http.head("docs/index.html") - .then(function(result) { - ctrl.docs = true; - }, function() { - ctrl.docs = false; - }); - } - }); - - angular.module('ozone').component('config', { - templateUrl: 'static/templates/config.html', - controller: function($scope, $http) { - var ctrl = this; - ctrl.selectedTags = []; - ctrl.configArray = []; - - $http.get("conf?cmd=getOzoneTags") - .then(function(response) { - ctrl.tags = response.data; - var excludedTags = ['CBLOCK', 'OM', 'SCM']; - for (var i = 0; i < excludedTags.length; i++) { - var idx = ctrl.tags.indexOf(excludedTags[i]); - // Remove CBLOCK related properties - if (idx > -1) { - ctrl.tags.splice(idx, 1); - } - } - ctrl.loadAll(); - }); - - ctrl.convertToArray = function(srcObj) { - ctrl.keyTagMap = {}; - for (var idx in srcObj) { - //console.log("Adding keys for "+idx) - for (var key in srcObj[idx]) { - - if (ctrl.keyTagMap.hasOwnProperty(key)) { - ctrl.keyTagMap[key]['tag'].push(idx); - } else { - var newProp = {}; - newProp['name'] = key; - newProp['value'] = srcObj[idx][key]; - newProp['tag'] = []; - newProp['tag'].push(idx); - ctrl.keyTagMap[key] = newProp; - } - } - } - } - - ctrl.loadAll = function() { - $http.get("conf?cmd=getPropertyByTag&tags=OM,SCM," + ctrl.tags) - .then(function(response) { - - ctrl.convertToArray(response.data); - ctrl.configs = Object.values(ctrl.keyTagMap); - ctrl.component = 'All'; - console.log("ajay -> " + JSON.stringify(ctrl.configs)); - ctrl.sortBy('name'); - }); - }; - - ctrl.filterTags = function() { - if (!ctrl.selectedTags) { - return true; - } - - if (ctrl.selectedTags.length < 1 && ctrl.component == 'All') { - return true; - } - - ctrl.configs = ctrl.configs.filter(function(item) { - - if (ctrl.component != 'All' && (item['tag'].indexOf(ctrl - .component) < 0)) { - console.log(item['name'] + " false tag " + item['tag']); - return false; - } - - if (ctrl.selectedTags.length < 1) { - return true; - } - for (var tag in item['tag']) { - tag = item['tag'][tag]; - if (ctrl.selectedTags.indexOf(tag) > -1) { - return true; - } - } - return false; - }); - - }; - ctrl.configFilter = function(config) { - return false; - }; - ctrl.selected = function(tag) { - return ctrl.selectedTags.includes(tag); - }; - - ctrl.switchto = function(tag) { - ctrl.component = tag; - ctrl.reloadConfig(); - }; - - ctrl.select = function(tag) { - var tagIdx = ctrl.selectedTags.indexOf(tag); - if (tagIdx > -1) { - ctrl.selectedTags.splice(tagIdx, 1); - } else { - ctrl.selectedTags.push(tag); - } - ctrl.reloadConfig(); - }; - - ctrl.reloadConfig = function() { - ctrl.configs = []; - ctrl.configs = Object.values(ctrl.keyTagMap); - ctrl.filterTags(); - }; - - ctrl.sortBy = function(field) { - ctrl.reverse = (ctrl.propertyName === field) ? !ctrl.reverse : false; - ctrl.propertyName = field; - }; - - ctrl.allSelected = function(comp) { - //console.log("Adding key for compo ->"+comp) - return ctrl.component == comp; - }; - - } - }); - -})(); \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html deleted file mode 100644 index b52f6533fc900..0000000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html +++ /dev/null @@ -1,91 +0,0 @@ - - -
-
- -
-
-
- All - - OM - SCM -
-
-
-
-
- - - - - - - - - - - - - - - -
Tag
{{tag}}
-
-
- - - - - - - - - - - - - - - -
- Property - - - - Value - - - Description - -
{{config.name}}{{config.value}}{{config.description}}
-
-
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html deleted file mode 100644 index c1f7d16aefa8b..0000000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - -
JVM:{{$ctrl.jmx.SystemProperties.java_vm_name}} {{$ctrl.jmx.SystemProperties.java_vm_version}}
Input arguments:{{$ctrl.jmx.InputArguments}}
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html deleted file mode 100644 index 95f1b4842f162..0000000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html +++ /dev/null @@ -1,60 +0,0 @@ - - diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html deleted file mode 100644 index 30e2d26f56f76..0000000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html +++ /dev/null @@ -1,39 +0,0 @@ - -

Overview

- - - - - - - - - - - - - - - -
Started:{{$ctrl.jmx.StartedTimeInMillis | date : 'medium'}}
Version:{{$ctrl.jmx.Version}}
Compiled:{{$ctrl.jmx.CompileInfo}}
- -

JVM parameters

- - - -
\ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html deleted file mode 100644 index facb15203043c..0000000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html +++ /dev/null @@ -1,87 +0,0 @@ - -
- Please set rpc.metrics.quantile.enable to true and define the - intervals in seconds with setting rpc.metrics.percentiles.intervals - (eg. set to 60,300) in your hdfs-site.xml - to display Hadoop RPC related graphs. -
-
-

{{window}} window

-

Quantiles based on a fixed {{window}} window. Calculated once at every - {{window}}

- -
-
-

{{metric}}

-

{{percentiles.numOps}} sample

- -
-
- -
-
-
-

Number of ops / Averages

- - - - - - - - - - - - - - -
Metric nameNumber of opsAverage time (ms)
{{key}}{{metric.numOps | number}}{{metric.avgTime | number:2}}
-
-
-

Success / Failures

- - - - - - - - - - - - - - - -
Metric nameSuccessFailures
{{key}}{{metric.success}}{{metric.failures}}
-
-
-
-

Other JMX Metrics

- - - - - - -
{{metric.key}}{{metric.value}}
-
\ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java deleted file mode 100644 index c6eae0e5fa631..0000000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import org.apache.hadoop.conf.Configuration; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test Common ozone/hdds web methods. - */ -public class TestBaseHttpServer { - @Test - public void getBindAddress() throws Exception { - Configuration conf = new Configuration(); - conf.set("enabled", "false"); - - BaseHttpServer baseHttpServer = new BaseHttpServer(conf, "test") { - @Override - protected String getHttpAddressKey() { - return null; - } - - @Override - protected String getHttpsAddressKey() { - return null; - } - - @Override - protected String getHttpBindHostKey() { - return null; - } - - @Override - protected String getHttpsBindHostKey() { - return null; - } - - @Override - protected String getBindHostDefault() { - return null; - } - - @Override - protected int getHttpBindPortDefault() { - return 0; - } - - @Override - protected int getHttpsBindPortDefault() { - return 0; - } - - @Override - protected String getKeytabFile() { - return null; - } - - @Override - protected String getSpnegoPrincipal() { - return null; - } - - @Override - protected String getEnabledKey() { - return "enabled"; - } - }; - - conf.set("addresskey", "0.0.0.0:1234"); - - Assert.assertEquals("/0.0.0.0:1234", baseHttpServer - .getBindAddress("bindhostkey", "addresskey", - "default", 65).toString()); - - conf.set("bindhostkey", "1.2.3.4"); - - Assert.assertEquals("/1.2.3.4:1234", baseHttpServer - .getBindAddress("bindhostkey", "addresskey", - "default", 65).toString()); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java deleted file mode 100644 index 3f34a70e6e75a..0000000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import java.util.ArrayList; -import java.util.List; - -/** - * Dummy class for testing to collect all the received events. - */ -public class EventHandlerStub implements EventHandler { - - private List receivedEvents = new ArrayList<>(); - - @Override - public void onMessage(PAYLOAD payload, EventPublisher publisher) { - receivedEvents.add(payload); - } - - public List getReceivedEvents() { - return receivedEvents; - } -} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java deleted file mode 100644 index 0c1200f6d1413..0000000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; - -/** - * Testing the basic functionality of the event queue. - */ -public class TestEventQueue { - - private static final Event EVENT1 = - new TypedEvent<>(Long.class, "SCM_EVENT1"); - private static final Event EVENT2 = - new TypedEvent<>(Long.class, "SCM_EVENT2"); - - private static final Event EVENT3 = - new TypedEvent<>(Long.class, "SCM_EVENT3"); - private static final Event EVENT4 = - new TypedEvent<>(Long.class, "SCM_EVENT4"); - - private EventQueue queue; - - @Before - public void startEventQueue() { - DefaultMetricsSystem.initialize(getClass().getSimpleName()); - queue = new EventQueue(); - } - - @After - public void stopEventQueue() { - DefaultMetricsSystem.shutdown(); - queue.close(); - } - - @Test - public void simpleEvent() { - - final long[] result = new long[2]; - - queue.addHandler(EVENT1, (payload, publisher) -> result[0] = payload); - - queue.fireEvent(EVENT1, 11L); - queue.processAll(1000); - Assert.assertEquals(11, result[0]); - - } - - @Test - public void multipleSubscriber() { - final long[] result = new long[2]; - queue.addHandler(EVENT2, (payload, publisher) -> result[0] = payload); - - queue.addHandler(EVENT2, (payload, publisher) -> result[1] = payload); - - queue.fireEvent(EVENT2, 23L); - queue.processAll(1000); - Assert.assertEquals(23, result[0]); - Assert.assertEquals(23, result[1]); - - } - -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java deleted file mode 100644 index bb05ef453e61b..0000000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import org.junit.Test; - -/** - * More realistic event test with sending event from one listener. - */ -public class TestEventQueueChain { - - private static final Event DECOMMISSION = - new TypedEvent<>(FailedNode.class); - - private static final Event DECOMMISSION_START = - new TypedEvent<>(FailedNode.class); - - @Test - public void simpleEvent() { - EventQueue queue = new EventQueue(); - - queue.addHandler(DECOMMISSION, new PipelineManager()); - queue.addHandler(DECOMMISSION_START, new NodeWatcher()); - - queue.fireEvent(DECOMMISSION, new FailedNode("node1")); - - queue.processAll(5000); - } - - - static class FailedNode { - private final String nodeId; - - FailedNode(String nodeId) { - this.nodeId = nodeId; - } - - String getNodeId() { - return nodeId; - } - } - - private static class PipelineManager implements EventHandler { - - @Override - public void onMessage(FailedNode message, EventPublisher publisher) { - - System.out.println( - "Closing pipelines for all pipelines including node: " + message - .getNodeId()); - - publisher.fireEvent(DECOMMISSION_START, message); - } - - } - - private static class NodeWatcher implements EventHandler { - - @Override - public void onMessage(FailedNode message, EventPublisher publisher) { - System.out.println("Clear timer"); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java deleted file mode 100644 index b72d2ae7680bb..0000000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java +++ /dev/null @@ -1,292 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import java.util.List; -import java.util.Objects; -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Test the basic functionality of event watcher. - */ -public class TestEventWatcher { - - private static final TypedEvent WATCH_UNDER_REPLICATED = - new TypedEvent<>(UnderreplicatedEvent.class); - - private static final TypedEvent UNDER_REPLICATED = - new TypedEvent<>(UnderreplicatedEvent.class); - - private static final TypedEvent - REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class); - - LeaseManager leaseManager; - - @Before - public void startLeaseManager() { - DefaultMetricsSystem.instance(); - leaseManager = new LeaseManager<>("Test", 2000L); - leaseManager.start(); - } - - @After - public void stopLeaseManager() { - leaseManager.shutdown(); - DefaultMetricsSystem.shutdown(); - } - - - @Test - public void testEventHandling() throws InterruptedException { - EventQueue queue = new EventQueue(); - - EventWatcher - replicationWatcher = createEventWatcher(); - - EventHandlerStub underReplicatedEvents = - new EventHandlerStub<>(); - - queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); - - replicationWatcher.start(queue); - - long id1 = HddsIdFactory.getLongId(); - long id2 = HddsIdFactory.getLongId(); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(id1, "C1")); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(id2, "C2")); - - Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); - - Thread.sleep(1000); - - queue.fireEvent(REPLICATION_COMPLETED, - new ReplicationCompletedEvent(id1, "C2", "D1")); - - Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); - - Thread.sleep(1500); - - queue.processAll(1000L); - - Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size()); - Assert.assertEquals(id2, - underReplicatedEvents.getReceivedEvents().get(0).id); - - } - - @Test - public void testInprogressFilter() throws InterruptedException { - - EventQueue queue = new EventQueue(); - - EventWatcher - replicationWatcher = createEventWatcher(); - - EventHandlerStub underReplicatedEvents = - new EventHandlerStub<>(); - - queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); - - replicationWatcher.start(queue); - - UnderreplicatedEvent event1 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event1); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2")); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1")); - - queue.processAll(1000L); - Thread.sleep(1000L); - List c1todo = replicationWatcher - .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); - - Assert.assertEquals(2, c1todo.size()); - Assert.assertTrue(replicationWatcher.contains(event1)); - Thread.sleep(1500L); - - c1todo = replicationWatcher - .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); - Assert.assertEquals(0, c1todo.size()); - Assert.assertFalse(replicationWatcher.contains(event1)); - - } - - @Test - public void testMetrics() throws InterruptedException { - - DefaultMetricsSystem.initialize("test"); - - EventQueue queue = new EventQueue(); - - EventWatcher - replicationWatcher = createEventWatcher(); - - EventHandlerStub underReplicatedEvents = - new EventHandlerStub<>(); - - queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); - - replicationWatcher.start(queue); - - //send 3 event to track 3 in-progress activity - UnderreplicatedEvent event1 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); - - UnderreplicatedEvent event2 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2"); - - UnderreplicatedEvent event3 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event1); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event2); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event3); - - //1st event is completed, don't need to track any more - ReplicationCompletedEvent event1Completed = - new ReplicationCompletedEvent(event1.id, "C1", "D1"); - - queue.fireEvent(REPLICATION_COMPLETED, event1Completed); - - Thread.sleep(2200l); - - //until now: 3 in-progress activities are tracked with three - // UnderreplicatedEvents. The first one is completed, the remaining two - // are timed out (as the timeout -- defined in the leasmanager -- is 2000ms. - - EventWatcherMetrics metrics = replicationWatcher.getMetrics(); - - //3 events are received - Assert.assertEquals(3, metrics.getTrackedEvents().value()); - - //one is finished. doesn't need to be resent - Assert.assertEquals(1, metrics.getCompletedEvents().value()); - - //Other two are timed out and resent - Assert.assertEquals(2, metrics.getTimedOutEvents().value()); - - DefaultMetricsSystem.shutdown(); - } - - private EventWatcher - createEventWatcher() { - return new CommandWatcherExample(WATCH_UNDER_REPLICATED, - REPLICATION_COMPLETED, leaseManager); - } - - private class CommandWatcherExample - extends EventWatcher { - - public CommandWatcherExample(Event startEvent, - Event completionEvent, - LeaseManager leaseManager) { - super("TestCommandWatcher", startEvent, completionEvent, leaseManager); - } - - @Override - protected void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) { - publisher.fireEvent(UNDER_REPLICATED, payload); - } - - @Override - protected void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) { - //Good job. We did it. - } - - @Override - public EventWatcherMetrics getMetrics() { - return super.getMetrics(); - } - } - - private static class ReplicationCompletedEvent - implements IdentifiableEventPayload { - - private final long id; - - private final String containerId; - - private final String datanodeId; - - public ReplicationCompletedEvent(long id, String containerId, - String datanodeId) { - this.id = id; - this.containerId = containerId; - this.datanodeId = datanodeId; - } - - public long getId() { - return id; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ReplicationCompletedEvent that = (ReplicationCompletedEvent) o; - return Objects.equals(containerId, that.containerId) && Objects - .equals(datanodeId, that.datanodeId); - } - - @Override - public int hashCode() { - - return Objects.hash(containerId, datanodeId); - } - } - - private static class UnderreplicatedEvent - - implements IdentifiableEventPayload { - - private final long id; - - private final String containerId; - - public UnderreplicatedEvent(long id, String containerId) { - this.containerId = containerId; - this.id = id; - } - - public long getId() { - return id; - } - } - -} diff --git a/hadoop-hdds/framework/src/test/resources/ozone-site.xml b/hadoop-hdds/framework/src/test/resources/ozone-site.xml deleted file mode 100644 index 77dd7ef994026..0000000000000 --- a/hadoop-hdds/framework/src/test/resources/ozone-site.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml deleted file mode 100644 index 59dfa79fc67b7..0000000000000 --- a/hadoop-hdds/pom.xml +++ /dev/null @@ -1,203 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-project-dist - 3.2.0-SNAPSHOT - ../hadoop-project-dist - - - hadoop-hdds - 0.3.0-SNAPSHOT - Apache Hadoop Distributed Data Store Project - Apache Hadoop HDDS - pom - - - client - common - framework - container-service - server-scm - tools - - - - - - org.apache.hadoop - hadoop-common - - - org.apache.hadoop - hadoop-hdfs - - - org.apache.hadoop - hadoop-hdfs-client - - - org.apache.hadoop - hadoop-common - test - test-jar - - - org.apache.hadoop - hadoop-hdfs - test - test-jar - - - info.picocli - picocli - 3.5.2 - - - com.google.protobuf - protobuf-java - compile - - - com.google.guava - guava - compile - - - junit - junit - test - - - - - - org.apache.rat - apache-rat-plugin - - - **/hs_err*.log - **/target/** - .gitattributes - .idea/** - src/main/resources/webapps/static/angular-1.6.4.min.js - src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js - src/main/resources/webapps/static/angular-route-1.6.4.min.js - src/main/resources/webapps/static/d3-3.5.17.min.js - src/main/resources/webapps/static/nvd3-1.8.5.min.css.map - src/main/resources/webapps/static/nvd3-1.8.5.min.css - src/main/resources/webapps/static/nvd3-1.8.5.min.js.map - src/main/resources/webapps/static/nvd3-1.8.5.min.js - src/test/resources/additionalfields.container - src/test/resources/incorrect.checksum.container - src/test/resources/incorrect.container - src/test/resources/test.db.ini - - - - - org.codehaus.mojo - findbugs-maven-plugin - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - add-classpath-descriptor - package - - build-classpath - - - true - $HDDS_LIB_JARS_DIR - true - runtime - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - - - - - parallel-tests - - - - org.apache.hadoop - hadoop-maven-plugins - - - parallel-tests-createdir - - parallel-tests-createdir - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testsThreadCount} - false - ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true - - ${testsThreadCount} - ${test.build.data}/${surefire.forkNumber} - ${test.build.dir}/${surefire.forkNumber} - ${hadoop.tmp.dir}/${surefire.forkNumber} - - - - - - ${test.build.data} - - - - - - fork-${surefire.forkNumber} - - - - - - - - diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml deleted file mode 100644 index f34e8482515b1..0000000000000 --- a/hadoop-hdds/server-scm/pom.xml +++ /dev/null @@ -1,129 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.3.0-SNAPSHOT - - hadoop-hdds-server-scm - 0.3.0-SNAPSHOT - Apache Hadoop Distributed Data Store Storage Container Manager Server - Apache Hadoop HDDS SCM Server - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - - org.apache.hadoop - hadoop-hdds-container-service - - - - org.apache.hadoop - hadoop-hdds-client - - - - org.apache.hadoop - hadoop-hdds-server-framework - - - - org.apache.hadoop - hadoop-hdds-container-service - test - test-jar - - - - org.hamcrest - hamcrest-core - 1.3 - test - - - io.dropwizard.metrics - metrics-core - - - org.assertj - assertj-core - test - - - org.openjdk.jmh - jmh-core - test - - - org.openjdk.jmh - jmh-generator-annprocess - test - - - org.mockito - mockito-all - test - - - org.hamcrest - hamcrest-all - 1.3 - - - org.bouncycastle - bcprov-jdk16 - test - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy-common-html - prepare-package - - unpack - - - - - org.apache.hadoop - hadoop-hdds-server-framework - ${project.build.outputDirectory} - - webapps/static/**/*.* - - - true - - - - - - - diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java deleted file mode 100644 index 435f0a593220c..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.server.Precheck; - -/** - * SCM utility class. - */ -public final class ScmUtils { - - private ScmUtils() { - } - - /** - * Perform all prechecks for given scm operation. - * - * @param operation - * @param preChecks prechecks to be performed - */ - public static void preCheck(ScmOps operation, Precheck... preChecks) - throws SCMException { - for (Precheck preCheck : preChecks) { - preCheck.check(operation); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java deleted file mode 100644 index f9aa0cd4f72d7..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.client.BlockID; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * - * Block APIs. - * Container is transparent to these APIs. - */ -public interface BlockManager extends Closeable { - /** - * Allocates a new block for a given size. - * @param size - Block Size - * @param type Replication Type - * @param factor - Replication Factor - * @return AllocatedBlock - * @throws IOException - */ - AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, String owner) throws IOException; - - /** - * Deletes a list of blocks in an atomic operation. Internally, SCM - * writes these blocks into a {@link DeletedBlockLog} and deletes them - * from SCM DB. If this is successful, given blocks are entering pending - * deletion state and becomes invisible from SCM namespace. - * - * @param blockIDs block IDs. This is often the list of blocks of - * a particular object key. - * @throws IOException if exception happens, non of the blocks is deleted. - */ - void deleteBlocks(List blockIDs) throws IOException; - - /** - * @return the block deletion transaction log maintained by SCM. - */ - DeletedBlockLog getDeletedBlockLog(); - - /** - * Start block manager background services. - * @throws IOException - */ - void start() throws IOException; - - /** - * Shutdown block manager background services. - * @throws IOException - */ - void stop() throws IOException; - - /** - * @return the block deleting service executed in SCM. - */ - SCMBlockDeletingService getSCMBlockDeletingService(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java deleted file mode 100644 index d383c687a7607..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ /dev/null @@ -1,485 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmUtils; -import org.apache.hadoop.hdds.scm.container.Mapping; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.server.ChillModePrecheck; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .CHILL_MODE_EXCEPTION; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .INVALID_BLOCK_SIZE; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; - -/** Block Manager manages the block access for SCM. */ -public class BlockManagerImpl implements EventHandler, - BlockManager, BlockmanagerMXBean { - private static final Logger LOG = - LoggerFactory.getLogger(BlockManagerImpl.class); - // TODO : FIX ME : Hard coding the owner. - // Currently only user of the block service is Ozone, CBlock manages blocks - // by itself and does not rely on the Block service offered by SCM. - - private final NodeManager nodeManager; - private final Mapping containerManager; - - private final long containerSize; - - private final DeletedBlockLog deletedBlockLog; - private final SCMBlockDeletingService blockDeletingService; - - private final int containerProvisionBatchSize; - private final Random rand; - private ObjectName mxBean; - private ChillModePrecheck chillModePrecheck; - - /** - * Constructor. - * - * @param conf - configuration. - * @param nodeManager - node manager. - * @param containerManager - container manager. - * @param eventPublisher - event publisher. - * @throws IOException - */ - public BlockManagerImpl(final Configuration conf, - final NodeManager nodeManager, final Mapping containerManager, - EventPublisher eventPublisher) - throws IOException { - this.nodeManager = nodeManager; - this.containerManager = containerManager; - - this.containerSize = (long)conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.BYTES); - - this.containerProvisionBatchSize = - conf.getInt( - ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT); - rand = new Random(); - - mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this); - - // SCM block deleting transaction log and deleting service. - deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager); - long svcInterval = - conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - long serviceTimeout = - conf.getTimeDuration( - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - blockDeletingService = - new SCMBlockDeletingService(deletedBlockLog, containerManager, - nodeManager, eventPublisher, svcInterval, serviceTimeout, conf); - chillModePrecheck = new ChillModePrecheck(); - } - - /** - * Start block manager services. - * - * @throws IOException - */ - public void start() throws IOException { - this.blockDeletingService.start(); - } - - /** - * Shutdown block manager services. - * - * @throws IOException - */ - public void stop() throws IOException { - this.blockDeletingService.shutdown(); - this.close(); - } - - /** - * Pre allocate specified count of containers for block creation. - * - * @param count - Number of containers to allocate. - * @param type - Type of containers - * @param factor - how many copies needed for this container. - * @throws IOException - */ - private synchronized void preAllocateContainers(int count, - ReplicationType type, ReplicationFactor factor, String owner) - throws IOException { - for (int i = 0; i < count; i++) { - ContainerWithPipeline containerWithPipeline; - try { - // TODO: Fix this later when Ratis is made the Default. - containerWithPipeline = containerManager.allocateContainer( - type, factor, owner); - - if (containerWithPipeline == null) { - LOG.warn("Unable to allocate container."); - } - } catch (IOException ex) { - LOG.warn("Unable to allocate container: {}", ex); - } - } - } - - /** - * Allocates a block in a container and returns that info. - * - * @param size - Block Size - * @param type Replication Type - * @param factor - Replication Factor - * @return Allocated block - * @throws IOException on failure. - */ - @Override - public AllocatedBlock allocateBlock(final long size, - ReplicationType type, ReplicationFactor factor, String owner) - throws IOException { - LOG.trace("Size;{} , type : {}, factor : {} ", size, type, factor); - ScmUtils.preCheck(ScmOps.allocateBlock, chillModePrecheck); - if (size < 0 || size > containerSize) { - LOG.warn("Invalid block size requested : {}", size); - throw new SCMException("Unsupported block size: " + size, - INVALID_BLOCK_SIZE); - } - - /* - Here is the high level logic. - - 1. First we check if there are containers in ALLOCATED state, that is - SCM has allocated them in the SCM namespace but the corresponding - container has not been created in the Datanode yet. If we have any in - that state, we will return that to the client, which allows client to - finish creating those containers. This is a sort of greedy algorithm, - our primary purpose is to get as many containers as possible. - - 2. If there are no allocated containers -- Then we find a Open container - that matches that pattern. - - 3. If both of them fail, the we will pre-allocate a bunch of containers - in SCM and try again. - - TODO : Support random picking of two containers from the list. So we can - use different kind of policies. - */ - - ContainerWithPipeline containerWithPipeline; - - // This is to optimize performance, if the below condition is evaluated - // to false, then we can be sure that there are no containers in - // ALLOCATED state. - // This can result in false positive, but it will never be false negative. - // How can this result in false positive? We check if there are any - // containers in ALLOCATED state, this check doesn't care about the - // USER of the containers. So there might be cases where a different - // USER has few containers in ALLOCATED state, which will result in - // false positive. - if (!containerManager.getStateManager().getContainerStateMap() - .getContainerIDsByState(HddsProtos.LifeCycleState.ALLOCATED) - .isEmpty()) { - // Since the above check can result in false positive, we have to do - // the actual check and find out if there are containers in ALLOCATED - // state matching our criteria. - synchronized (this) { - // Using containers from ALLOCATED state should be done within - // synchronized block (or) write lock. Since we already hold a - // read lock, we will end up in deadlock situation if we take - // write lock here. - containerWithPipeline = containerManager - .getMatchingContainerWithPipeline(size, owner, type, factor, - HddsProtos.LifeCycleState.ALLOCATED); - if (containerWithPipeline != null) { - containerManager.updateContainerState( - containerWithPipeline.getContainerInfo().getContainerID(), - HddsProtos.LifeCycleEvent.CREATE); - return newBlock(containerWithPipeline, - HddsProtos.LifeCycleState.ALLOCATED); - } - } - } - - // Since we found no allocated containers that match our criteria, let us - // look for OPEN containers that match the criteria. - containerWithPipeline = containerManager - .getMatchingContainerWithPipeline(size, owner, type, factor, - HddsProtos.LifeCycleState.OPEN); - if (containerWithPipeline != null) { - return newBlock(containerWithPipeline, HddsProtos.LifeCycleState.OPEN); - } - - // We found neither ALLOCATED or OPEN Containers. This generally means - // that most of our containers are full or we have not allocated - // containers of the type and replication factor. So let us go and - // allocate some. - - // Even though we have already checked the containers in ALLOCATED - // state, we have to check again as we only hold a read lock. - // Some other thread might have pre-allocated container in meantime. - synchronized (this) { - if (!containerManager.getStateManager().getContainerStateMap() - .getContainerIDsByState(HddsProtos.LifeCycleState.ALLOCATED) - .isEmpty()) { - containerWithPipeline = containerManager - .getMatchingContainerWithPipeline(size, owner, type, factor, - HddsProtos.LifeCycleState.ALLOCATED); - } - if (containerWithPipeline == null) { - preAllocateContainers(containerProvisionBatchSize, - type, factor, owner); - containerWithPipeline = containerManager - .getMatchingContainerWithPipeline(size, owner, type, factor, - HddsProtos.LifeCycleState.ALLOCATED); - } - - if (containerWithPipeline != null) { - containerManager.updateContainerState( - containerWithPipeline.getContainerInfo().getContainerID(), - HddsProtos.LifeCycleEvent.CREATE); - return newBlock(containerWithPipeline, - HddsProtos.LifeCycleState.ALLOCATED); - } - } - // we have tried all strategies we know and but somehow we are not able - // to get a container for this block. Log that info and return a null. - LOG.error( - "Unable to allocate a block for the size: {}, type: {}, factor: {}", - size, type, factor); - return null; - } - - /** - * newBlock - returns a new block assigned to a container. - * - * @param containerWithPipeline - Container Info. - * @param state - Current state of the container. - * @return AllocatedBlock - */ - private AllocatedBlock newBlock(ContainerWithPipeline containerWithPipeline, - HddsProtos.LifeCycleState state) throws IOException { - ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); - if (containerWithPipeline.getPipeline().getDatanodes().size() == 0) { - LOG.error("Pipeline Machine count is zero."); - return null; - } - - // TODO : Revisit this local ID allocation when HA is added. - long localID = UniqueId.next(); - long containerID = containerInfo.getContainerID(); - - boolean createContainer = (state == HddsProtos.LifeCycleState.ALLOCATED); - - AllocatedBlock.Builder abb = - new AllocatedBlock.Builder() - .setBlockID(new BlockID(containerID, localID)) - .setPipeline(containerWithPipeline.getPipeline()) - .setShouldCreateContainer(createContainer); - LOG.trace("New block allocated : {} Container ID: {}", localID, - containerID); - return abb.build(); - } - - /** - * Deletes a list of blocks in an atomic operation. Internally, SCM writes - * these blocks into a - * {@link DeletedBlockLog} and deletes them from SCM DB. If this is - * successful, given blocks are - * entering pending deletion state and becomes invisible from SCM namespace. - * - * @param blockIDs block IDs. This is often the list of blocks of a - * particular object key. - * @throws IOException if exception happens, non of the blocks is deleted. - */ - @Override - public void deleteBlocks(List blockIDs) throws IOException { - if (!nodeManager.isOutOfChillMode()) { - throw new SCMException("Unable to delete block while in chill mode", - CHILL_MODE_EXCEPTION); - } - - LOG.info("Deleting blocks {}", StringUtils.join(",", blockIDs)); - Map> containerBlocks = new HashMap<>(); - // TODO: track the block size info so that we can reclaim the container - // TODO: used space when the block is deleted. - for (BlockID block : blockIDs) { - // Merge blocks to a container to blocks mapping, - // prepare to persist this info to the deletedBlocksLog. - long containerID = block.getContainerID(); - if (containerBlocks.containsKey(containerID)) { - containerBlocks.get(containerID).add(block.getLocalID()); - } else { - List item = new ArrayList<>(); - item.add(block.getLocalID()); - containerBlocks.put(containerID, item); - } - } - - try { - deletedBlockLog.addTransactions(containerBlocks); - } catch (IOException e) { - throw new IOException( - "Skip writing the deleted blocks info to" - + " the delLog because addTransaction fails. Batch skipped: " - + StringUtils.join(",", blockIDs), e); - } - // TODO: Container report handling of the deleted blocks: - // Remove tombstone and update open container usage. - // We will revisit this when the closed container replication is done. - } - - @Override - public DeletedBlockLog getDeletedBlockLog() { - return this.deletedBlockLog; - } - - /** - * Close the resources for BlockManager. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (deletedBlockLog != null) { - deletedBlockLog.close(); - } - blockDeletingService.shutdown(); - if (mxBean != null) { - MBeans.unregister(mxBean); - mxBean = null; - } - } - - @Override - public int getOpenContainersNo() { - return 0; - // TODO : FIX ME : The open container being a single number does not make - // sense. - // We have to get open containers by Replication Type and Replication - // factor. Hence returning 0 for now. - // containers.get(HddsProtos.LifeCycleState.OPEN).size(); - } - - @Override - public SCMBlockDeletingService getSCMBlockDeletingService() { - return this.blockDeletingService; - } - - @Override - public void onMessage(Boolean inChillMode, EventPublisher publisher) { - this.chillModePrecheck.setInChillMode(inChillMode); - } - - /** - * Returns status of scm chill mode determined by CHILL_MODE_STATUS event. - * */ - public boolean isScmInChillMode() { - return this.chillModePrecheck.isInChillMode(); - } - - /** - * Get class logger. - * */ - public static Logger getLogger() { - return LOG; - } - - /** - * This class uses system current time milliseconds to generate unique id. - */ - public static final class UniqueId { - /* - * When we represent time in milliseconds using 'long' data type, - * the LSB bits are used. Currently we are only using 44 bits (LSB), - * 20 bits (MSB) are not used. - * We will exhaust this 44 bits only when we are in year 2525, - * until then we can safely use this 20 bits (MSB) for offset to generate - * unique id within millisecond. - * - * Year : Mon Dec 31 18:49:04 IST 2525 - * TimeInMillis: 17545641544247 - * Binary Representation: - * MSB (20 bits): 0000 0000 0000 0000 0000 - * LSB (44 bits): 1111 1111 0101 0010 1001 1011 1011 0100 1010 0011 0111 - * - * We have 20 bits to run counter, we should exclude the first bit (MSB) - * as we don't want to deal with negative values. - * To be on safer side we will use 'short' data type which is of length - * 16 bits and will give us 65,536 values for offset. - * - */ - - private static volatile short offset = 0; - - /** - * Private constructor so that no one can instantiate this class. - */ - private UniqueId() {} - - /** - * Calculate and returns next unique id based on System#currentTimeMillis. - * - * @return unique long value - */ - public static synchronized long next() { - long utcTime = Time.getUtcTime(); - if ((utcTime & 0xFFFF000000000000L) == 0) { - return utcTime << Short.SIZE | (offset++ & 0x0000FFFF); - } - throw new RuntimeException("Got invalid UTC time," + - " cannot generate unique Id. UTC Time: " + utcTime); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java deleted file mode 100644 index 23c6983083e34..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; - - -/** - * JMX interface for the block manager. - */ -public interface BlockmanagerMXBean { - - /** - * Number of open containers manager by the block manager. - */ - int getOpenContainersNo(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java deleted file mode 100644 index 8702a42d26af4..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import com.google.common.collect.ArrayListMultimap; -import org.apache.hadoop.hdds.scm.container.Mapping; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; - -/** - * A wrapper class to hold info about datanode and all deleted block - * transactions that will be sent to this datanode. - */ -public class DatanodeDeletedBlockTransactions { - private int nodeNum; - // The throttle size for each datanode. - private int maximumAllowedTXNum; - // Current counter of inserted TX. - private int currentTXNum; - private Mapping mappingService; - // A list of TXs mapped to a certain datanode ID. - private final ArrayListMultimap - transactions; - - DatanodeDeletedBlockTransactions(Mapping mappingService, - int maximumAllowedTXNum, int nodeNum) { - this.transactions = ArrayListMultimap.create(); - this.mappingService = mappingService; - this.maximumAllowedTXNum = maximumAllowedTXNum; - this.nodeNum = nodeNum; - } - - public boolean addTransaction(DeletedBlocksTransaction tx, - Set dnsWithTransactionCommitted) { - Pipeline pipeline = null; - try { - ContainerWithPipeline containerWithPipeline = - mappingService.getContainerWithPipeline(tx.getContainerID()); - if (containerWithPipeline.getContainerInfo().isContainerOpen() - || containerWithPipeline.getPipeline().isEmpty()) { - return false; - } - pipeline = containerWithPipeline.getPipeline(); - } catch (IOException e) { - SCMBlockDeletingService.LOG.warn("Got container info error.", e); - return false; - } - - boolean success = false; - for (DatanodeDetails dd : pipeline.getMachines()) { - UUID dnID = dd.getUuid(); - if (dnsWithTransactionCommitted == null || - !dnsWithTransactionCommitted.contains(dnID)) { - // Transaction need not be sent to dns which have already committed it - success = addTransactionToDN(dnID, tx); - } - } - return success; - } - - private boolean addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) { - if (transactions.containsKey(dnID)) { - List txs = transactions.get(dnID); - if (txs != null && txs.size() < maximumAllowedTXNum) { - boolean hasContained = false; - for (DeletedBlocksTransaction t : txs) { - if (t.getContainerID() == tx.getContainerID()) { - hasContained = true; - break; - } - } - - if (!hasContained) { - txs.add(tx); - currentTXNum++; - return true; - } - } - } else { - currentTXNum++; - transactions.put(dnID, tx); - return true; - } - SCMBlockDeletingService.LOG - .debug("Transaction added: {} <- TX({})", dnID, tx.getTxID()); - return false; - } - - Set getDatanodeIDs() { - return transactions.keySet(); - } - - boolean isEmpty() { - return transactions.isEmpty(); - } - - boolean hasTransactions(UUID dnId) { - return transactions.containsKey(dnId) && - !transactions.get(dnId).isEmpty(); - } - - List getDatanodeTransactions(UUID dnId) { - return transactions.get(dnId); - } - - List getTransactionIDList(UUID dnId) { - if (hasTransactions(dnId)) { - return transactions.get(dnId).stream() - .map(DeletedBlocksTransaction::getTxID).map(String::valueOf) - .collect(Collectors.toList()); - } else { - return Collections.emptyList(); - } - } - - boolean isFull() { - return currentTXNum >= maximumAllowedTXNum * nodeNum; - } - - int getTXNum() { - return currentTXNum; - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java deleted file mode 100644 index db6c1c5dda24b..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -/** - * The DeletedBlockLog is a persisted log in SCM to keep tracking - * container blocks which are under deletion. It maintains info - * about under-deletion container blocks that notified by OM, - * and the state how it is processed. - */ -public interface DeletedBlockLog extends Closeable { - - /** - * Scan entire log once and returns TXs to DatanodeDeletedBlockTransactions. - * Once DatanodeDeletedBlockTransactions is full, the scan behavior will - * stop. - * @param transactions a list of TXs will be set into. - * @return Mapping from containerId to latest transactionId for the container. - * @throws IOException - */ - Map getTransactions(DatanodeDeletedBlockTransactions transactions) - throws IOException; - - /** - * Return all failed transactions in the log. A transaction is considered - * to be failed if it has been sent more than MAX_RETRY limit and its - * count is reset to -1. - * - * @return a list of failed deleted block transactions. - * @throws IOException - */ - List getFailedTransactions() - throws IOException; - - /** - * Increments count for given list of transactions by 1. - * The log maintains a valid range of counts for each transaction - * [0, MAX_RETRY]. If exceed this range, resets it to -1 to indicate - * the transaction is no longer valid. - * - * @param txIDs - transaction ID. - */ - void incrementCount(List txIDs) - throws IOException; - - /** - * Commits a transaction means to delete all footprints of a transaction - * from the log. This method doesn't guarantee all transactions can be - * successfully deleted, it tolerate failures and tries best efforts to. - * @param transactionResults - delete block transaction results. - * @param dnID - ID of datanode which acknowledges the delete block command. - */ - void commitTransactions(List transactionResults, - UUID dnID); - - /** - * Creates a block deletion transaction and adds that into the log. - * - * @param containerID - container ID. - * @param blocks - blocks that belong to the same container. - * - * @throws IOException - */ - void addTransaction(long containerID, List blocks) - throws IOException; - - /** - * Creates block deletion transactions for a set of containers, - * add into the log and persist them atomically. An object key - * might be stored in multiple containers and multiple blocks, - * this API ensures that these updates are done in atomic manner - * so if any of them fails, the entire operation fails without - * any updates to the log. Note, this doesn't mean to create only - * one transaction, it creates multiple transactions (depends on the - * number of containers) together (on success) or non (on failure). - * - * @param containerBlocksMap a map of containerBlocks. - * @throws IOException - */ - void addTransactions(Map> containerBlocksMap) - throws IOException; - - /** - * Returns the total number of valid transactions. A transaction is - * considered to be valid as long as its count is in range [0, MAX_RETRY]. - * - * @return number of a valid transactions. - * @throws IOException - */ - int getNumOfValidTransactions() throws IOException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java deleted file mode 100644 index 68435d1c6a219..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ /dev/null @@ -1,428 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.hdds.scm.command - .CommandStatusReportHandler.DeleteBlockStatus; -import org.apache.hadoop.hdds.scm.container.Mapping; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.utils.BatchOperation; -import org.apache.hadoop.utils.MetadataStore; -import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.eclipse.jetty.util.ConcurrentHashSet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.stream.Collectors; - -import static java.lang.Math.min; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_BLOCK_DELETION_MAX_RETRY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone.OzoneConsts.DELETED_BLOCK_DB; - -/** - * A implement class of {@link DeletedBlockLog}, and it uses - * K/V db to maintain block deletion transactions between scm and datanode. - * This is a very basic implementation, it simply scans the log and - * memorize the position that scanned by last time, and uses this to - * determine where the next scan starts. It has no notion about weight - * of each transaction so as long as transaction is still valid, they get - * equally same chance to be retrieved which only depends on the nature - * order of the transaction ID. - */ -public class DeletedBlockLogImpl - implements DeletedBlockLog, EventHandler { - - public static final Logger LOG = - LoggerFactory.getLogger(DeletedBlockLogImpl.class); - - private static final byte[] LATEST_TXID = - DFSUtil.string2Bytes("#LATEST_TXID#"); - - private final int maxRetry; - private final MetadataStore deletedStore; - private final Mapping containerManager; - private final Lock lock; - // The latest id of deleted blocks in the db. - private long lastTxID; - // Maps txId to set of DNs which are successful in committing the transaction - private Map> transactionToDNsCommitMap; - - public DeletedBlockLogImpl(Configuration conf, Mapping containerManager) - throws IOException { - maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, - OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT); - - File metaDir = getOzoneMetaDirPath(conf); - String scmMetaDataDir = metaDir.getPath(); - File deletedLogDbPath = new File(scmMetaDataDir, DELETED_BLOCK_DB); - int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - // Load store of all transactions. - deletedStore = MetadataStoreBuilder.newBuilder() - .setCreateIfMissing(true) - .setConf(conf) - .setDbFile(deletedLogDbPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); - this.containerManager = containerManager; - - this.lock = new ReentrantLock(); - // start from the head of deleted store. - lastTxID = findLatestTxIDInStore(); - - // transactionToDNsCommitMap is updated only when - // transaction is added to the log and when it is removed. - - // maps transaction to dns which have committed it. - transactionToDNsCommitMap = new ConcurrentHashMap<>(); - } - - @VisibleForTesting - public MetadataStore getDeletedStore() { - return deletedStore; - } - - /** - * There is no need to lock before reading because - * it's only used in construct method. - * - * @return latest txid. - * @throws IOException - */ - private long findLatestTxIDInStore() throws IOException { - long txid = 0; - byte[] value = deletedStore.get(LATEST_TXID); - if (value != null) { - txid = Longs.fromByteArray(value); - } - return txid; - } - - @Override - public List getFailedTransactions() - throws IOException { - lock.lock(); - try { - final List failedTXs = Lists.newArrayList(); - deletedStore.iterate(null, (key, value) -> { - if (!Arrays.equals(LATEST_TXID, key)) { - DeletedBlocksTransaction delTX = - DeletedBlocksTransaction.parseFrom(value); - if (delTX.getCount() == -1) { - failedTXs.add(delTX); - } - } - return true; - }); - return failedTXs; - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} - * - * @param txIDs - transaction ID. - * @throws IOException - */ - @Override - public void incrementCount(List txIDs) throws IOException { - BatchOperation batch = new BatchOperation(); - lock.lock(); - try { - for(Long txID : txIDs) { - try { - byte[] deleteBlockBytes = - deletedStore.get(Longs.toByteArray(txID)); - if (deleteBlockBytes == null) { - LOG.warn("Delete txID {} not found", txID); - continue; - } - DeletedBlocksTransaction block = DeletedBlocksTransaction - .parseFrom(deleteBlockBytes); - DeletedBlocksTransaction.Builder builder = block.toBuilder(); - int currentCount = block.getCount(); - if (currentCount > -1) { - builder.setCount(++currentCount); - } - // if the retry time exceeds the maxRetry value - // then set the retry value to -1, stop retrying, admins can - // analyze those blocks and purge them manually by SCMCli. - if (currentCount > maxRetry) { - builder.setCount(-1); - } - deletedStore.put(Longs.toByteArray(txID), - builder.build().toByteArray()); - } catch (IOException ex) { - LOG.warn("Cannot increase count for txID " + txID, ex); - } - } - deletedStore.writeBatch(batch); - } finally { - lock.unlock(); - } - } - - private DeletedBlocksTransaction constructNewTransaction(long txID, - long containerID, List blocks) { - return DeletedBlocksTransaction.newBuilder() - .setTxID(txID) - .setContainerID(containerID) - .addAllLocalID(blocks) - .setCount(0) - .build(); - } - - /** - * {@inheritDoc} - * - * @param transactionResults - transaction IDs. - * @param dnID - Id of Datanode which has acknowledged a delete block command. - * @throws IOException - */ - @Override - public void commitTransactions( - List transactionResults, UUID dnID) { - lock.lock(); - try { - Set dnsWithCommittedTxn; - for (DeleteBlockTransactionResult transactionResult : - transactionResults) { - if (isTransactionFailed(transactionResult)) { - continue; - } - try { - long txID = transactionResult.getTxID(); - // set of dns which have successfully committed transaction txId. - dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID); - Long containerId = transactionResult.getContainerID(); - if (dnsWithCommittedTxn == null) { - LOG.warn("Transaction txId={} commit by dnId={} for containerID={} " - + "failed. Corresponding entry not found.", txID, dnID, - containerId); - return; - } - - dnsWithCommittedTxn.add(dnID); - Pipeline pipeline = - containerManager.getContainerWithPipeline(containerId) - .getPipeline(); - Collection containerDnsDetails = - pipeline.getDatanodes().values(); - // The delete entry can be safely removed from the log if all the - // corresponding nodes commit the txn. It is required to check that - // the nodes returned in the pipeline match the replication factor. - if (min(containerDnsDetails.size(), dnsWithCommittedTxn.size()) - >= pipeline.getFactor().getNumber()) { - List containerDns = containerDnsDetails.stream() - .map(DatanodeDetails::getUuid) - .collect(Collectors.toList()); - if (dnsWithCommittedTxn.containsAll(containerDns)) { - transactionToDNsCommitMap.remove(txID); - LOG.debug("Purging txId={} from block deletion log", txID); - deletedStore.delete(Longs.toByteArray(txID)); - } - } - LOG.debug("Datanode txId={} containerId={} committed by dnId={}", - txID, containerId, dnID); - } catch (IOException e) { - LOG.warn("Could not commit delete block transaction: " + - transactionResult.getTxID(), e); - } - } - } finally { - lock.unlock(); - } - } - - private boolean isTransactionFailed(DeleteBlockTransactionResult result) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Got block deletion ACK from datanode, TXIDs={}, " + "success={}", - result.getTxID(), result.getSuccess()); - } - if (!result.getSuccess()) { - LOG.warn("Got failed ACK for TXID={}, prepare to resend the " - + "TX in next interval", result.getTxID()); - return true; - } - return false; - } - - /** - * {@inheritDoc} - * - * @param containerID - container ID. - * @param blocks - blocks that belong to the same container. - * @throws IOException - */ - @Override - public void addTransaction(long containerID, List blocks) - throws IOException { - BatchOperation batch = new BatchOperation(); - lock.lock(); - try { - DeletedBlocksTransaction tx = constructNewTransaction(lastTxID + 1, - containerID, blocks); - byte[] key = Longs.toByteArray(lastTxID + 1); - - batch.put(key, tx.toByteArray()); - batch.put(LATEST_TXID, Longs.toByteArray(lastTxID + 1)); - - deletedStore.writeBatch(batch); - lastTxID += 1; - } finally { - lock.unlock(); - } - } - - @Override - public int getNumOfValidTransactions() throws IOException { - lock.lock(); - try { - final AtomicInteger num = new AtomicInteger(0); - deletedStore.iterate(null, (key, value) -> { - // Exclude latest txid record - if (!Arrays.equals(LATEST_TXID, key)) { - DeletedBlocksTransaction delTX = - DeletedBlocksTransaction.parseFrom(value); - if (delTX.getCount() > -1) { - num.incrementAndGet(); - } - } - return true; - }); - return num.get(); - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} - * - * @param containerBlocksMap a map of containerBlocks. - * @throws IOException - */ - @Override - public void addTransactions( - Map> containerBlocksMap) - throws IOException { - BatchOperation batch = new BatchOperation(); - lock.lock(); - try { - long currentLatestID = lastTxID; - for (Map.Entry> entry : - containerBlocksMap.entrySet()) { - currentLatestID += 1; - byte[] key = Longs.toByteArray(currentLatestID); - DeletedBlocksTransaction tx = constructNewTransaction(currentLatestID, - entry.getKey(), entry.getValue()); - batch.put(key, tx.toByteArray()); - } - lastTxID = currentLatestID; - batch.put(LATEST_TXID, Longs.toByteArray(lastTxID)); - deletedStore.writeBatch(batch); - } finally { - lock.unlock(); - } - } - - @Override - public void close() throws IOException { - if (deletedStore != null) { - deletedStore.close(); - } - } - - @Override - public Map getTransactions( - DatanodeDeletedBlockTransactions transactions) throws IOException { - lock.lock(); - try { - Map deleteTransactionMap = new HashMap<>(); - deletedStore.iterate(null, (key, value) -> { - if (!Arrays.equals(LATEST_TXID, key)) { - DeletedBlocksTransaction block = DeletedBlocksTransaction - .parseFrom(value); - - if (block.getCount() > -1 && block.getCount() <= maxRetry) { - if (transactions.addTransaction(block, - transactionToDNsCommitMap.get(block.getTxID()))) { - deleteTransactionMap.put(block.getContainerID(), block.getTxID()); - transactionToDNsCommitMap - .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>()); - } - } - return !transactions.isFull(); - } - return true; - }); - return deleteTransactionMap; - } finally { - lock.unlock(); - } - } - - @Override - public void onMessage(DeleteBlockStatus deleteBlockStatus, - EventPublisher publisher) { - ContainerBlocksDeletionACKProto ackProto = - deleteBlockStatus.getCmdStatus().getBlockDeletionAck(); - commitTransactions(ackProto.getResultsList(), - UUID.fromString(ackProto.getDnId())); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java deleted file mode 100644 index 736daac54c290..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -public class PendingDeleteHandler implements - EventHandler { - - private SCMBlockDeletingService scmBlockDeletingService; - - public PendingDeleteHandler( - SCMBlockDeletingService scmBlockDeletingService) { - this.scmBlockDeletingService = scmBlockDeletingService; - } - - @Override - public void onMessage(PendingDeleteStatusList pendingDeleteStatusList, - EventPublisher publisher) { - scmBlockDeletingService.handlePendingDeletes(pendingDeleteStatusList); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java deleted file mode 100644 index 904762db5962e..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.util.LinkedList; -import java.util.List; - -public class PendingDeleteStatusList { - - private List pendingDeleteStatuses; - private DatanodeDetails datanodeDetails; - - public PendingDeleteStatusList(DatanodeDetails datanodeDetails) { - this.datanodeDetails = datanodeDetails; - pendingDeleteStatuses = new LinkedList<>(); - } - - public void addPendingDeleteStatus(long dnDeleteTransactionId, - long scmDeleteTransactionId, long containerId) { - pendingDeleteStatuses.add( - new PendingDeleteStatus(dnDeleteTransactionId, scmDeleteTransactionId, - containerId)); - } - - public static class PendingDeleteStatus { - private long dnDeleteTransactionId; - private long scmDeleteTransactionId; - private long containerId; - - public PendingDeleteStatus(long dnDeleteTransactionId, - long scmDeleteTransactionId, long containerId) { - this.dnDeleteTransactionId = dnDeleteTransactionId; - this.scmDeleteTransactionId = scmDeleteTransactionId; - this.containerId = containerId; - } - - public long getDnDeleteTransactionId() { - return dnDeleteTransactionId; - } - - public long getScmDeleteTransactionId() { - return scmDeleteTransactionId; - } - - public long getContainerId() { - return containerId; - } - - } - - public List getPendingDeleteStatuses() { - return pendingDeleteStatuses; - } - - public int getNumPendingDeletes() { - return pendingDeleteStatuses.size(); - } - - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java deleted file mode 100644 index b85d77f084175..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.Mapping; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.utils.BackgroundService; -import org.apache.hadoop.utils.BackgroundTask; -import org.apache.hadoop.utils.BackgroundTaskQueue; -import org.apache.hadoop.utils.BackgroundTaskResult.EmptyTaskResult; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; - -/** - * A background service running in SCM to delete blocks. This service scans - * block deletion log in certain interval and caches block deletion commands - * in {@link org.apache.hadoop.hdds.scm.node.CommandQueue}, asynchronously - * SCM HB thread polls cached commands and sends them to datanode for physical - * processing. - */ -public class SCMBlockDeletingService extends BackgroundService { - - public static final Logger LOG = - LoggerFactory.getLogger(SCMBlockDeletingService.class); - - // ThreadPoolSize=2, 1 for scheduler and the other for the scanner. - private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 2; - private final DeletedBlockLog deletedBlockLog; - private final Mapping mappingService; - private final NodeManager nodeManager; - private final EventPublisher eventPublisher; - - // Block delete limit size is dynamically calculated based on container - // delete limit size (ozone.block.deleting.container.limit.per.interval) - // that configured for datanode. To ensure DN not wait for - // delete commands, we use this value multiply by a factor 2 as the final - // limit TX size for each node. - // Currently we implement a throttle algorithm that throttling delete blocks - // for each datanode. Each node is limited by the calculation size. Firstly - // current node info is fetched from nodemanager, then scan entire delLog - // from the beginning to end. If one node reaches maximum value, its records - // will be skipped. If not, keep scanning until it reaches maximum value. - // Once all node are full, the scan behavior will stop. - private int blockDeleteLimitSize; - - public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog, - Mapping mapper, NodeManager nodeManager, EventPublisher eventPublisher, - long interval, long serviceTimeout, Configuration conf) { - super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS, - BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); - this.deletedBlockLog = deletedBlockLog; - this.mappingService = mapper; - this.nodeManager = nodeManager; - this.eventPublisher = eventPublisher; - - int containerLimit = conf.getInt( - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT); - Preconditions.checkArgument(containerLimit > 0, - "Container limit size should be " + "positive."); - // Use container limit value multiply by a factor 2 to ensure DN - // not wait for orders. - this.blockDeleteLimitSize = containerLimit * 2; - } - - @Override - public BackgroundTaskQueue getTasks() { - BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new DeletedBlockTransactionScanner()); - return queue; - } - - public void handlePendingDeletes(PendingDeleteStatusList deletionStatusList) { - DatanodeDetails dnDetails = deletionStatusList.getDatanodeDetails(); - for (PendingDeleteStatusList.PendingDeleteStatus deletionStatus : - deletionStatusList.getPendingDeleteStatuses()) { - LOG.info( - "Block deletion txnID mismatch in datanode {} for containerID {}." - + " Datanode delete txnID: {}, SCM txnID: {}", - dnDetails.getUuid(), deletionStatus.getContainerId(), - deletionStatus.getDnDeleteTransactionId(), - deletionStatus.getScmDeleteTransactionId()); - } - } - - private class DeletedBlockTransactionScanner - implements BackgroundTask { - - @Override - public int getPriority() { - return 1; - } - - @Override - public EmptyTaskResult call() throws Exception { - int dnTxCount = 0; - long startTime = Time.monotonicNow(); - // Scan SCM DB in HB interval and collect a throttled list of - // to delete blocks. - LOG.debug("Running DeletedBlockTransactionScanner"); - DatanodeDeletedBlockTransactions transactions = null; - List datanodes = nodeManager.getNodes(NodeState.HEALTHY); - Map transactionMap = null; - if (datanodes != null) { - transactions = new DatanodeDeletedBlockTransactions(mappingService, - blockDeleteLimitSize, datanodes.size()); - try { - transactionMap = deletedBlockLog.getTransactions(transactions); - } catch (IOException e) { - // We may tolerant a number of failures for sometime - // but if it continues to fail, at some point we need to raise - // an exception and probably fail the SCM ? At present, it simply - // continues to retry the scanning. - LOG.error("Failed to get block deletion transactions from delTX log", - e); - } - LOG.debug("Scanned deleted blocks log and got {} delTX to process.", - transactions.getTXNum()); - } - - if (transactions != null && !transactions.isEmpty()) { - for (UUID dnId : transactions.getDatanodeIDs()) { - List dnTXs = transactions - .getDatanodeTransactions(dnId); - if (dnTXs != null && !dnTXs.isEmpty()) { - dnTxCount += dnTXs.size(); - // TODO commandQueue needs a cap. - // We should stop caching new commands if num of un-processed - // command is bigger than a limit, e.g 50. In case datanode goes - // offline for sometime, the cached commands be flooded. - eventPublisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, - new CommandForDatanode<>(dnId, new DeleteBlocksCommand(dnTXs))); - LOG.debug( - "Added delete block command for datanode {} in the queue," - + " number of delete block transactions: {}, TxID list: {}", - dnId, dnTXs.size(), String.join(",", - transactions.getTransactionIDList(dnId))); - } - } - mappingService.updateDeleteTransactionId(transactionMap); - } - - if (dnTxCount > 0) { - LOG.info( - "Totally added {} delete blocks command for" - + " {} datanodes, task elapsed time: {}ms", - dnTxCount, transactions.getDatanodeIDs().size(), - Time.monotonicNow() - startTime); - } - - return EmptyTaskResult.newResult(); - } - } - - @VisibleForTesting - public void setBlockDeleteTXNum(int numTXs) { - blockDeleteLimitSize = numTXs; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java deleted file mode 100644 index e1bfdff5063db..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; -/** - * This package contains routines to manage the block location and - * mapping inside SCM - */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java deleted file mode 100644 index c0de3820bf24c..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.command; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .CommandStatusReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * Handles CommandStatusReports from datanode. - */ -public class CommandStatusReportHandler implements - EventHandler { - - private static final Logger LOGGER = LoggerFactory - .getLogger(CommandStatusReportHandler.class); - - @Override - public void onMessage(CommandStatusReportFromDatanode report, - EventPublisher publisher) { - Preconditions.checkNotNull(report); - List cmdStatusList = report.getReport().getCmdStatusList(); - Preconditions.checkNotNull(cmdStatusList); - LOGGER.trace("Processing command status report for dn: {}", report - .getDatanodeDetails()); - - // Route command status to its watchers. - cmdStatusList.forEach(cmdStatus -> { - LOGGER.trace("Emitting command status for id:{} type: {}", cmdStatus - .getCmdId(), cmdStatus.getType()); - switch (cmdStatus.getType()) { - case replicateContainerCommand: - publisher.fireEvent(SCMEvents.REPLICATION_STATUS, new - ReplicationStatus(cmdStatus)); - break; - case closeContainerCommand: - publisher.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new - CloseContainerStatus(cmdStatus)); - break; - case deleteBlocksCommand: - if (cmdStatus.getStatus() == CommandStatus.Status.EXECUTED) { - publisher.fireEvent(SCMEvents.DELETE_BLOCK_STATUS, - new DeleteBlockStatus(cmdStatus)); - } - break; - default: - LOGGER.debug("CommandStatus of type:{} not handled in " + - "CommandStatusReportHandler.", cmdStatus.getType()); - break; - } - }); - } - - /** - * Wrapper event for CommandStatus. - */ - public static class CommandStatusEvent implements IdentifiableEventPayload { - private CommandStatus cmdStatus; - - CommandStatusEvent(CommandStatus cmdStatus) { - this.cmdStatus = cmdStatus; - } - - public CommandStatus getCmdStatus() { - return cmdStatus; - } - - @Override - public String toString() { - return "CommandStatusEvent:" + cmdStatus.toString(); - } - - @Override - public long getId() { - return cmdStatus.getCmdId(); - } - } - - /** - * Wrapper event for Replicate Command. - */ - public static class ReplicationStatus extends CommandStatusEvent { - public ReplicationStatus(CommandStatus cmdStatus) { - super(cmdStatus); - } - } - - /** - * Wrapper event for CloseContainer Command. - */ - public static class CloseContainerStatus extends CommandStatusEvent { - public CloseContainerStatus(CommandStatus cmdStatus) { - super(cmdStatus); - } - } - - /** - * Wrapper event for DeleteBlock Command. - */ - public static class DeleteBlockStatus extends CommandStatusEvent { - public DeleteBlockStatus(CommandStatus cmdStatus) { - super(cmdStatus); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java deleted file mode 100644 index ba17fb9eeaaad..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - *

- * This package contains HDDS protocol related classes. - */ - -/** - * This package contains HDDS protocol related classes. - */ -package org.apache.hadoop.hdds.scm.command; -/* - * Classes related to commands issued from SCM to DataNode. - * */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java deleted file mode 100644 index 7baecc4b1f85b..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- *

http://www.apache.org/licenses/LICENSE-2.0 - *

- *

Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ; - -/** - * In case of a node failure, volume failure, volume out of spapce, node - * out of space etc, CLOSE_CONTAINER will be triggered. - * CloseContainerEventHandler is the handler for CLOSE_CONTAINER. - * When a close container event is fired, a close command for the container - * should be sent to all the datanodes in the pipeline and containerStateManager - * needs to update the container state to Closing. - */ -public class CloseContainerEventHandler implements EventHandler { - - public static final Logger LOG = - LoggerFactory.getLogger(CloseContainerEventHandler.class); - - - private final Mapping containerManager; - - public CloseContainerEventHandler(Mapping containerManager) { - this.containerManager = containerManager; - } - - @Override - public void onMessage(ContainerID containerID, EventPublisher publisher) { - - LOG.info("Close container Event triggered for container : {}", - containerID.getId()); - ContainerWithPipeline containerWithPipeline; - ContainerInfo info; - try { - containerWithPipeline = - containerManager.getContainerWithPipeline(containerID.getId()); - info = containerWithPipeline.getContainerInfo(); - if (info == null) { - LOG.error("Failed to update the container state. Container with id : {}" - + " does not exist", containerID.getId()); - return; - } - } catch (IOException e) { - LOG.error("Failed to update the container state. Container with id : {} " - + "does not exist", containerID.getId(), e); - return; - } - - HddsProtos.LifeCycleState state = info.getState(); - try { - switch (state) { - case ALLOCATED: - // We cannot close a container in ALLOCATED state, moving the - // container to CREATING state, this should eventually - // timeout and the container will be moved to DELETING state. - LOG.debug("Closing container {} in {} state", containerID, state); - containerManager.updateContainerState(containerID.getId(), - HddsProtos.LifeCycleEvent.CREATE); - break; - case CREATING: - // We cannot close a container in CREATING state, it will eventually - // timeout and moved to DELETING state. - LOG.debug("Closing container {} in {} state", containerID, state); - break; - case OPEN: - containerManager.updateContainerState(containerID.getId(), - HddsProtos.LifeCycleEvent.FINALIZE); - fireCloseContainerEvents(containerWithPipeline, info, publisher); - break; - case CLOSING: - fireCloseContainerEvents(containerWithPipeline, info, publisher); - break; - case CLOSED: - case DELETING: - case DELETED: - LOG.info( - "container with id : {} is in {} state and need not be closed.", - containerID.getId(), info.getState()); - break; - default: - throw new IOException( - "Invalid container state for container " + containerID); - } - } catch (IOException ex) { - LOG.error("Failed to update the container state for" + "container : {}" - + containerID, ex); - } - } - - private void fireCloseContainerEvents( - ContainerWithPipeline containerWithPipeline, ContainerInfo info, - EventPublisher publisher) { - ContainerID containerID = info.containerID(); - // fire events. - CloseContainerCommand closeContainerCommand = - new CloseContainerCommand(containerID.getId(), - info.getReplicationType(), info.getPipelineID()); - - Pipeline pipeline = containerWithPipeline.getPipeline(); - pipeline.getMachines().stream().map( - datanode -> new CommandForDatanode<>(datanode.getUuid(), - closeContainerCommand)).forEach((command) -> { - publisher.fireEvent(DATANODE_COMMAND, command); - }); - publisher.fireEvent(CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(containerID)); - LOG.trace("Issuing {} on Pipeline {} for container", closeContainerCommand, - pipeline, containerID); - } - - /** - * Class to create retryable event. Prevents redundant requests for same - * container Id. - */ - public static class CloseContainerRetryableReq implements - IdentifiableEventPayload { - - private ContainerID containerID; - public CloseContainerRetryableReq(ContainerID containerID) { - this.containerID = containerID; - } - - public ContainerID getContainerID() { - return containerID; - } - - @Override - public long getId() { - return containerID.getId(); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerWatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerWatcher.java deleted file mode 100644 index 8e277b9f36953..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerWatcher.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- *

http://www.apache.org/licenses/LICENSE-2.0 - *

- *

Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler - .CloseContainerStatus; - -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventWatcher; -import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler - .CloseContainerRetryableReq; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.ozone.lease.LeaseNotFoundException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * This watcher will watch for CLOSE_CONTAINER_STATUS events fired from - * CommandStatusReport. If required it will re-trigger CloseContainer command - * for DataNodes to CloseContainerEventHandler. - */ -public class CloseContainerWatcher extends - EventWatcher { - - public static final Logger LOG = - LoggerFactory.getLogger(CloseContainerWatcher.class); - private final Mapping containerManager; - - public CloseContainerWatcher(Event startEvent, - Event completionEvent, - LeaseManager leaseManager, Mapping containerManager) { - super(startEvent, completionEvent, leaseManager); - this.containerManager = containerManager; - } - - @Override - protected void onTimeout(EventPublisher publisher, - CloseContainerRetryableReq payload) { - // Let CloseContainerEventHandler handle this message. - this.resendEventToHandler(payload.getId(), publisher); - } - - @Override - protected void onFinished(EventPublisher publisher, - CloseContainerRetryableReq payload) { - LOG.trace("CloseContainerCommand for containerId: {} executed ", payload - .getContainerID().getId()); - } - - @Override - protected synchronized void handleCompletion(CloseContainerStatus status, - EventPublisher publisher) throws LeaseNotFoundException { - // If status is PENDING then return without doing anything. - if(status.getCmdStatus().getStatus().equals(Status.PENDING)){ - return; - } - - CloseContainerRetryableReq closeCont = getTrackedEventbyId(status.getId()); - super.handleCompletion(status, publisher); - // If status is FAILED then send a msg to Handler to resend the command. - if (status.getCmdStatus().getStatus().equals(Status.FAILED) && closeCont - != null) { - this.resendEventToHandler(closeCont.getId(), publisher); - } - } - - private void resendEventToHandler(long containerID, EventPublisher - publisher) { - try { - // Check if container is still open - if (containerManager.getContainer(containerID).isContainerOpen()) { - publisher.fireEvent(SCMEvents.CLOSE_CONTAINER, - ContainerID.valueof(containerID)); - } - } catch (IOException e) { - LOG.warn("Error in CloseContainerWatcher while processing event " + - "for containerId {} ExceptionMsg: ", containerID, e.getMessage()); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java deleted file mode 100644 index ce399eb89b8b7..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerActionsFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles container reports from datanode. - */ -public class ContainerActionsHandler implements - EventHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - ContainerActionsHandler.class); - - @Override - public void onMessage( - ContainerActionsFromDatanode containerReportFromDatanode, - EventPublisher publisher) { - DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails(); - for (ContainerAction action : containerReportFromDatanode.getReport() - .getContainerActionsList()) { - ContainerID containerId = ContainerID.valueof(action.getContainerID()); - switch (action.getAction()) { - case CLOSE: - LOG.debug("Closing container {} in datanode {} because the" + - " container is {}.", containerId, dd, action.getReason()); - publisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerId); - break; - default: - LOG.warn("Invalid action {} with reason {}, from datanode {}. ", - action.getAction(), action.getReason(), dd); } - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java deleted file mode 100644 index 71e17e9982db5..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ /dev/null @@ -1,699 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- *

http://www.apache.org/licenses/LICENSE-2.0 - *

- *

Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SCMContainerInfo; -import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.lease.Lease; -import org.apache.hadoop.ozone.lease.LeaseException; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.utils.BatchOperation; -import org.apache.hadoop.utils.MetadataStore; -import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; - -/** - * Mapping class contains the mapping from a name to a pipeline mapping. This - * is used by SCM when - * allocating new locations and when looking up a key. - */ -public class ContainerMapping implements Mapping { - private static final Logger LOG = LoggerFactory.getLogger(ContainerMapping - .class); - - private final NodeManager nodeManager; - private final long cacheSize; - private final Lock lock; - private final Charset encoding = Charset.forName("UTF-8"); - private final MetadataStore containerStore; - private final PipelineSelector pipelineSelector; - private final ContainerStateManager containerStateManager; - private final LeaseManager containerLeaseManager; - private final EventPublisher eventPublisher; - private final long size; - - /** - * Constructs a mapping class that creates mapping between container names - * and pipelines. - * - * @param nodeManager - NodeManager so that we can get the nodes that are - * healthy to place new - * containers. - * @param cacheSizeMB - Amount of memory reserved for the LSM tree to cache - * its nodes. This is - * passed to LevelDB and this memory is allocated in Native code space. - * CacheSize is specified - * in MB. - * @throws IOException on Failure. - */ - @SuppressWarnings("unchecked") - public ContainerMapping( - final Configuration conf, final NodeManager nodeManager, final int - cacheSizeMB, EventPublisher eventPublisher) throws IOException { - this.nodeManager = nodeManager; - this.cacheSize = cacheSizeMB; - - File metaDir = getOzoneMetaDirPath(conf); - - // Write the container name to pipeline mapping. - File containerDBPath = new File(metaDir, SCM_CONTAINER_DB); - containerStore = - MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(containerDBPath) - .setCacheSize(this.cacheSize * OzoneConsts.MB) - .build(); - - this.lock = new ReentrantLock(); - - size = (long)conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, - OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); - - this.pipelineSelector = new PipelineSelector(nodeManager, - conf, eventPublisher, cacheSizeMB); - - this.containerStateManager = - new ContainerStateManager(conf, this, pipelineSelector); - LOG.trace("Container State Manager created."); - - this.eventPublisher = eventPublisher; - - long containerCreationLeaseTimeout = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, - ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - containerLeaseManager = new LeaseManager<>("ContainerCreation", - containerCreationLeaseTimeout); - containerLeaseManager.start(); - } - - /** - * {@inheritDoc} - */ - @Override - public ContainerInfo getContainer(final long containerID) throws - IOException { - ContainerInfo containerInfo; - lock.lock(); - try { - byte[] containerBytes = containerStore.get( - Longs.toByteArray(containerID)); - if (containerBytes == null) { - throw new SCMException( - "Specified key does not exist. key : " + containerID, - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } - - HddsProtos.SCMContainerInfo temp = HddsProtos.SCMContainerInfo.PARSER - .parseFrom(containerBytes); - containerInfo = ContainerInfo.fromProtobuf(temp); - return containerInfo; - } finally { - lock.unlock(); - } - } - - /** - * Returns the ContainerInfo and pipeline from the containerID. If container - * has no available replicas in datanodes it returns pipeline with no - * datanodes and empty leaderID . Pipeline#isEmpty can be used to check for - * an empty pipeline. - * - * @param containerID - ID of container. - * @return - ContainerWithPipeline such as creation state and the pipeline. - * @throws IOException - */ - @Override - public ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException { - ContainerInfo contInfo; - lock.lock(); - try { - byte[] containerBytes = containerStore.get( - Longs.toByteArray(containerID)); - if (containerBytes == null) { - throw new SCMException( - "Specified key does not exist. key : " + containerID, - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } - HddsProtos.SCMContainerInfo temp = HddsProtos.SCMContainerInfo.PARSER - .parseFrom(containerBytes); - contInfo = ContainerInfo.fromProtobuf(temp); - - Pipeline pipeline; - String leaderId = ""; - if (contInfo.isContainerOpen()) { - // If pipeline with given pipeline Id already exist return it - pipeline = pipelineSelector.getPipeline(contInfo.getPipelineID()); - } else { - // For close containers create pipeline from datanodes with replicas - Set dnWithReplicas = containerStateManager - .getContainerReplicas(contInfo.containerID()); - if (!dnWithReplicas.isEmpty()) { - leaderId = dnWithReplicas.iterator().next().getUuidString(); - } - pipeline = new Pipeline(leaderId, contInfo.getState(), - ReplicationType.STAND_ALONE, contInfo.getReplicationFactor(), - PipelineID.randomId()); - dnWithReplicas.forEach(pipeline::addMember); - } - return new ContainerWithPipeline(contInfo, pipeline); - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} - */ - @Override - public List listContainer(long startContainerID, - int count) throws IOException { - List containerList = new ArrayList<>(); - lock.lock(); - try { - if (containerStore.isEmpty()) { - throw new IOException("No container exists in current db"); - } - byte[] startKey = startContainerID <= 0 ? null : - Longs.toByteArray(startContainerID); - List> range = - containerStore.getSequentialRangeKVs(startKey, count, null); - - // Transform the values into the pipelines. - // TODO: filter by container state - for (Map.Entry entry : range) { - ContainerInfo containerInfo = - ContainerInfo.fromProtobuf( - HddsProtos.SCMContainerInfo.PARSER.parseFrom( - entry.getValue())); - Preconditions.checkNotNull(containerInfo); - containerList.add(containerInfo); - } - } finally { - lock.unlock(); - } - return containerList; - } - - /** - * Allocates a new container. - * - * @param replicationFactor - replication factor of the container. - * @param owner - The string name of the Service that owns this container. - * @return - Pipeline that makes up this container. - * @throws IOException - Exception - */ - @Override - public ContainerWithPipeline allocateContainer( - ReplicationType type, - ReplicationFactor replicationFactor, - String owner) - throws IOException { - - ContainerInfo containerInfo; - ContainerWithPipeline containerWithPipeline; - - lock.lock(); - try { - containerWithPipeline = containerStateManager.allocateContainer( - pipelineSelector, type, replicationFactor, owner); - containerInfo = containerWithPipeline.getContainerInfo(); - - byte[] containerIDBytes = Longs.toByteArray( - containerInfo.getContainerID()); - containerStore.put(containerIDBytes, containerInfo.getProtobuf() - .toByteArray()); - } finally { - lock.unlock(); - } - return containerWithPipeline; - } - - /** - * Deletes a container from SCM. - * - * @param containerID - Container ID - * @throws IOException if container doesn't exist or container store failed - * to delete the - * specified key. - */ - @Override - public void deleteContainer(long containerID) throws IOException { - lock.lock(); - try { - byte[] dbKey = Longs.toByteArray(containerID); - byte[] containerBytes = containerStore.get(dbKey); - if (containerBytes == null) { - throw new SCMException( - "Failed to delete container " + containerID + ", reason : " + - "container doesn't exist.", - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } - containerStore.delete(dbKey); - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} Used by client to update container state on SCM. - */ - @Override - public HddsProtos.LifeCycleState updateContainerState( - long containerID, HddsProtos.LifeCycleEvent event) throws - IOException { - ContainerInfo containerInfo; - lock.lock(); - try { - byte[] dbKey = Longs.toByteArray(containerID); - byte[] containerBytes = containerStore.get(dbKey); - if (containerBytes == null) { - throw new SCMException( - "Failed to update container state" - + containerID - + ", reason : container doesn't exist.", - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } - containerInfo = - ContainerInfo.fromProtobuf(HddsProtos.SCMContainerInfo.PARSER - .parseFrom(containerBytes)); - - Preconditions.checkNotNull(containerInfo); - switch (event) { - case CREATE: - // Acquire lease on container - Lease containerLease = - containerLeaseManager.acquire(containerInfo); - // Register callback to be executed in case of timeout - containerLease.registerCallBack(() -> { - updateContainerState(containerID, - HddsProtos.LifeCycleEvent.TIMEOUT); - return null; - }); - break; - case CREATED: - // Release the lease on container - containerLeaseManager.release(containerInfo); - break; - case FINALIZE: - // TODO: we don't need a lease manager here for closing as the - // container report will include the container state after HDFS-13008 - // If a client failed to update the container close state, DN container - // report from 3 DNs will be used to close the container eventually. - break; - case CLOSE: - break; - case UPDATE: - break; - case DELETE: - break; - case TIMEOUT: - break; - case CLEANUP: - break; - default: - throw new SCMException("Unsupported container LifeCycleEvent.", - FAILED_TO_CHANGE_CONTAINER_STATE); - } - // If the below updateContainerState call fails, we should revert the - // changes made in switch case. - // Like releasing the lease in case of BEGIN_CREATE. - ContainerInfo updatedContainer = containerStateManager - .updateContainerState(containerInfo, event); - if (!updatedContainer.isContainerOpen()) { - pipelineSelector.removeContainerFromPipeline( - containerInfo.getPipelineID(), containerID); - } - containerStore.put(dbKey, updatedContainer.getProtobuf().toByteArray()); - return updatedContainer.getState(); - } catch (LeaseException e) { - throw new IOException("Lease Exception.", e); - } finally { - lock.unlock(); - } - } - - /** - * Update deleteTransactionId according to deleteTransactionMap. - * - * @param deleteTransactionMap Maps the containerId to latest delete - * transaction id for the container. - * @throws IOException - */ - public void updateDeleteTransactionId(Map deleteTransactionMap) - throws IOException { - if (deleteTransactionMap == null) { - return; - } - - lock.lock(); - try { - BatchOperation batch = new BatchOperation(); - for (Map.Entry entry : deleteTransactionMap.entrySet()) { - long containerID = entry.getKey(); - byte[] dbKey = Longs.toByteArray(containerID); - byte[] containerBytes = containerStore.get(dbKey); - if (containerBytes == null) { - throw new SCMException( - "Failed to increment number of deleted blocks for container " - + containerID + ", reason : " + "container doesn't exist.", - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } - ContainerInfo containerInfo = ContainerInfo.fromProtobuf( - HddsProtos.SCMContainerInfo.parseFrom(containerBytes)); - containerInfo.updateDeleteTransactionId(entry.getValue()); - batch.put(dbKey, containerInfo.getProtobuf().toByteArray()); - } - containerStore.writeBatch(batch); - containerStateManager - .updateDeleteTransactionId(deleteTransactionMap); - } finally { - lock.unlock(); - } - } - - /** - * Returns the container State Manager. - * - * @return ContainerStateManager - */ - @Override - public ContainerStateManager getStateManager() { - return containerStateManager; - } - - /** - * Return a container matching the attributes specified. - * - * @param sizeRequired - Space needed in the Container. - * @param owner - Owner of the container - A specific nameservice. - * @param type - Replication Type {StandAlone, Ratis} - * @param factor - Replication Factor {ONE, THREE} - * @param state - State of the Container-- {Open, Allocated etc.} - * @return ContainerInfo, null if there is no match found. - */ - public ContainerWithPipeline getMatchingContainerWithPipeline( - final long sizeRequired, String owner, ReplicationType type, - ReplicationFactor factor, LifeCycleState state) throws IOException { - ContainerInfo containerInfo = getStateManager() - .getMatchingContainer(sizeRequired, owner, type, factor, state); - if (containerInfo == null) { - return null; - } - Pipeline pipeline = pipelineSelector - .getPipeline(containerInfo.getPipelineID()); - return new ContainerWithPipeline(containerInfo, pipeline); - } - - /** - * Process container report from Datanode. - *

- * Processing follows a very simple logic for time being. - *

- * 1. Datanodes report the current State -- denoted by the datanodeState - *

- * 2. We are the older SCM state from the Database -- denoted by - * the knownState. - *

- * 3. We copy the usage etc. from currentState to newState and log that - * newState to the DB. This allows us SCM to bootup again and read the - * state of the world from the DB, and then reconcile the state from - * container reports, when they arrive. - * - * @param reports Container report - */ - @Override - public void processContainerReports(DatanodeDetails datanodeDetails, - ContainerReportsProto reports, boolean isRegisterCall) - throws IOException { - List - containerInfos = reports.getReportsList(); - PendingDeleteStatusList pendingDeleteStatusList = - new PendingDeleteStatusList(datanodeDetails); - for (StorageContainerDatanodeProtocolProtos.ContainerInfo contInfo : - containerInfos) { - // Update replica info during registration process. - if (isRegisterCall) { - try { - getStateManager().addContainerReplica(ContainerID. - valueof(contInfo.getContainerID()), datanodeDetails); - } catch (Exception ex) { - // Continue to next one after logging the error. - LOG.error("Error while adding replica for containerId {}.", - contInfo.getContainerID(), ex); - } - } - byte[] dbKey = Longs.toByteArray(contInfo.getContainerID()); - lock.lock(); - try { - byte[] containerBytes = containerStore.get(dbKey); - if (containerBytes != null) { - HddsProtos.SCMContainerInfo knownState = - HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); - - if (knownState.getState() == LifeCycleState.CLOSING - && contInfo.getState() == LifeCycleState.CLOSED) { - - updateContainerState(contInfo.getContainerID(), - LifeCycleEvent.CLOSE); - - //reread the container - knownState = - HddsProtos.SCMContainerInfo.PARSER - .parseFrom(containerStore.get(dbKey)); - } - - HddsProtos.SCMContainerInfo newState = - reconcileState(contInfo, knownState, datanodeDetails); - - if (knownState.getDeleteTransactionId() > contInfo - .getDeleteTransactionId()) { - pendingDeleteStatusList - .addPendingDeleteStatus(contInfo.getDeleteTransactionId(), - knownState.getDeleteTransactionId(), - knownState.getContainerID()); - } - - // FIX ME: This can be optimized, we write twice to memory, where a - // single write would work well. - // - // We need to write this to DB again since the closed only write - // the updated State. - containerStore.put(dbKey, newState.toByteArray()); - - } else { - // Container not found in our container db. - LOG.error("Error while processing container report from datanode :" + - " {}, for container: {}, reason: container doesn't exist in" + - "container database.", datanodeDetails, - contInfo.getContainerID()); - } - } finally { - lock.unlock(); - } - } - if (pendingDeleteStatusList.getNumPendingDeletes() > 0) { - eventPublisher.fireEvent(SCMEvents.PENDING_DELETE_STATUS, - pendingDeleteStatusList); - } - - } - - /** - * Reconciles the state from Datanode with the state in SCM. - * - * @param datanodeState - State from the Datanode. - * @param knownState - State inside SCM. - * @param dnDetails - * @return new SCM State for this container. - */ - private HddsProtos.SCMContainerInfo reconcileState( - StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState, - SCMContainerInfo knownState, DatanodeDetails dnDetails) { - HddsProtos.SCMContainerInfo.Builder builder = - HddsProtos.SCMContainerInfo.newBuilder(); - builder.setContainerID(knownState.getContainerID()) - .setPipelineID(knownState.getPipelineID()) - .setReplicationType(knownState.getReplicationType()) - .setReplicationFactor(knownState.getReplicationFactor()); - - // TODO: If current state doesn't have this DN in list of DataNodes with - // replica then add it in list of replicas. - - // If used size is greater than allocated size, we will be updating - // allocated size with used size. This update is done as a fallback - // mechanism in case SCM crashes without properly updating allocated - // size. Correct allocated value will be updated by - // ContainerStateManager during SCM shutdown. - long usedSize = datanodeState.getUsed(); - long allocated = knownState.getAllocatedBytes() > usedSize ? - knownState.getAllocatedBytes() : usedSize; - builder.setAllocatedBytes(allocated) - .setUsedBytes(usedSize) - .setNumberOfKeys(datanodeState.getKeyCount()) - .setState(knownState.getState()) - .setStateEnterTime(knownState.getStateEnterTime()) - .setContainerID(knownState.getContainerID()) - .setDeleteTransactionId(knownState.getDeleteTransactionId()); - if (knownState.getOwner() != null) { - builder.setOwner(knownState.getOwner()); - } - return builder.build(); - } - - - /** - * In Container is in closed state, if it is in closed, Deleting or Deleted - * State. - * - * @param info - ContainerInfo. - * @return true if is in open state, false otherwise - */ - private boolean shouldClose(ContainerInfo info) { - return info.getState() == HddsProtos.LifeCycleState.OPEN; - } - - private boolean isClosed(ContainerInfo info) { - return info.getState() == HddsProtos.LifeCycleState.CLOSED; - } - - /** - * Closes this stream and releases any system resources associated with it. - * If the stream is - * already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful - * attention. It is strongly advised to relinquish the underlying resources - * and to internally - * mark the {@code Closeable} as closed, prior to throwing the - * {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - if (containerLeaseManager != null) { - containerLeaseManager.shutdown(); - } - if (containerStateManager != null) { - flushContainerInfo(); - containerStateManager.close(); - } - if (containerStore != null) { - containerStore.close(); - } - - if (pipelineSelector != null) { - pipelineSelector.shutdown(); - } - } - - /** - * Since allocatedBytes of a container is only in memory, stored in - * containerStateManager, when closing ContainerMapping, we need to update - * this in the container store. - * - * @throws IOException on failure. - */ - @VisibleForTesting - public void flushContainerInfo() throws IOException { - List containers = containerStateManager.getAllContainers(); - List failedContainers = new ArrayList<>(); - for (ContainerInfo info : containers) { - // even if some container updated failed, others can still proceed - try { - byte[] dbKey = Longs.toByteArray(info.getContainerID()); - byte[] containerBytes = containerStore.get(dbKey); - // TODO : looks like when a container is deleted, the container is - // removed from containerStore but not containerStateManager, so it can - // return info of a deleted container. may revisit this in the future, - // for now, just skip a not-found container - if (containerBytes != null) { - containerStore.put(dbKey, info.getProtobuf().toByteArray()); - } else { - LOG.debug("Container state manager has container {} but not found " + - "in container store, a deleted container?", - info.getContainerID()); - } - } catch (IOException ioe) { - failedContainers.add(info.getContainerID()); - } - } - if (!failedContainers.isEmpty()) { - throw new IOException("Error in flushing container info from container " + - "state manager: " + failedContainers); - } - } - - @VisibleForTesting - public MetadataStore getContainerStore() { - return containerStore; - } - - public PipelineSelector getPipelineSelector() { - return pipelineSelector; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java deleted file mode 100644 index 71935f0aa251d..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; -import java.util.Set; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationActivityStatus; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles container reports from datanode. - */ -public class ContainerReportHandler implements - EventHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerReportHandler.class); - - private final NodeManager nodeManager; - - private final Mapping containerMapping; - - private ContainerStateManager containerStateManager; - - private ReplicationActivityStatus replicationStatus; - - public ContainerReportHandler(Mapping containerMapping, - NodeManager nodeManager, - ReplicationActivityStatus replicationActivityStatus) { - Preconditions.checkNotNull(containerMapping); - Preconditions.checkNotNull(nodeManager); - Preconditions.checkNotNull(replicationActivityStatus); - this.containerStateManager = containerMapping.getStateManager(); - this.nodeManager = nodeManager; - this.containerMapping = containerMapping; - this.replicationStatus = replicationActivityStatus; - } - - @Override - public void onMessage(ContainerReportFromDatanode containerReportFromDatanode, - EventPublisher publisher) { - - DatanodeDetails datanodeOrigin = - containerReportFromDatanode.getDatanodeDetails(); - - ContainerReportsProto containerReport = - containerReportFromDatanode.getReport(); - try { - - //update state in container db and trigger close container events - containerMapping - .processContainerReports(datanodeOrigin, containerReport, false); - - Set containerIds = containerReport.getReportsList().stream() - .map(StorageContainerDatanodeProtocolProtos - .ContainerInfo::getContainerID) - .map(ContainerID::new) - .collect(Collectors.toSet()); - - ReportResult reportResult = nodeManager - .processContainerReport(datanodeOrigin.getUuid(), containerIds); - - //we have the report, so we can update the states for the next iteration. - nodeManager - .setContainersForDatanode(datanodeOrigin.getUuid(), containerIds); - - for (ContainerID containerID : reportResult.getMissingEntries()) { - containerStateManager - .removeContainerReplica(containerID, datanodeOrigin); - checkReplicationState(containerID, publisher); - } - - for (ContainerID containerID : reportResult.getNewEntries()) { - containerStateManager.addContainerReplica(containerID, datanodeOrigin); - checkReplicationState(containerID, publisher); - } - - } catch (IOException e) { - //TODO: stop all the replication? - LOG.error("Error on processing container report from datanode {}", - datanodeOrigin, e); - } - - } - - private void checkReplicationState(ContainerID containerID, - EventPublisher publisher) - throws SCMException { - ContainerInfo container = containerStateManager.getContainer(containerID); - - if (container == null) { - //warning unknown container - LOG.warn( - "Container is missing from containerStateManager. Can't request " - + "replication. {}", - containerID); - return; - } - if (container.isContainerOpen()) { - return; - } - - ReplicationRequest replicationState = - containerStateManager.checkReplicationState(containerID); - if (replicationState != null) { - if (replicationStatus.isReplicationEnabled()) { - publisher.fireEvent(SCMEvents.REPLICATE_CONTAINER, - replicationState); - } else { - LOG.warn( - "Over/under replicated container but the replication is not " - + "(yet) enabled: " - + replicationState.toString()); - } - } - - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java deleted file mode 100644 index 930c098f0f574..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ /dev/null @@ -1,570 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest; -import org.apache.hadoop.hdds.scm.container.states.ContainerState; -import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; -import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.apache.hadoop.util.Time; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; - -/** - * A container state manager keeps track of container states and returns - * containers that match various queries. - *

- * This state machine is driven by a combination of server and client actions. - *

- * This is how a create container happens: 1. When a container is created, the - * Server(or SCM) marks that Container as ALLOCATED state. In this state, SCM - * has chosen a pipeline for container to live on. However, the container is not - * created yet. This container along with the pipeline is returned to the - * client. - *

- * 2. The client when it sees the Container state as ALLOCATED understands that - * container needs to be created on the specified pipeline. The client lets the - * SCM know that saw this flag and is initiating the on the data nodes. - *

- * This is done by calling into notifyObjectCreation(ContainerName, - * BEGIN_CREATE) flag. When SCM gets this call, SCM puts the container state - * into CREATING. All this state means is that SCM told Client to create a - * container and client saw that request. - *

- * 3. Then client makes calls to datanodes directly, asking the datanodes to - * create the container. This is done with the help of pipeline that supports - * this container. - *

- * 4. Once the creation of the container is complete, the client will make - * another call to the SCM, this time specifying the containerName and the - * COMPLETE_CREATE as the Event. - *

- * 5. With COMPLETE_CREATE event, the container moves to an Open State. This is - * the state when clients can write to a container. - *

- * 6. If the client does not respond with the COMPLETE_CREATE event with a - * certain time, the state machine times out and triggers a delete operation of - * the container. - *

- * Please see the function initializeStateMachine below to see how this looks in - * code. - *

- * Reusing existing container : - *

- * The create container call is not made all the time, the system tries to use - * open containers as much as possible. So in those cases, it looks thru the - * list of open containers and will return containers that match the specific - * signature. - *

- * Please note : Logically there are 3 separate state machines in the case of - * containers. - *

- * The Create State Machine -- Commented extensively above. - *

- * Open/Close State Machine - Once the container is in the Open State, - * eventually it will be closed, once sufficient data has been written to it. - *

- * TimeOut Delete Container State Machine - if the container creating times out, - * then Container State manager decides to delete the container. - */ -public class ContainerStateManager implements Closeable { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerStateManager.class); - - private final StateMachine stateMachine; - - private final long containerSize; - private final ConcurrentHashMap lastUsedMap; - private final ContainerStateMap containers; - private final AtomicLong containerCount; - - /** - * Constructs a Container State Manager that tracks all containers owned by - * SCM for the purpose of allocation of blocks. - *

- * TODO : Add Container Tags so we know which containers are owned by SCM. - */ - @SuppressWarnings("unchecked") - public ContainerStateManager(Configuration configuration, - Mapping containerMapping, PipelineSelector pipelineSelector) { - - // Initialize the container state machine. - Set finalStates = new HashSet(); - - // These are the steady states of a container. - finalStates.add(LifeCycleState.OPEN); - finalStates.add(LifeCycleState.CLOSED); - finalStates.add(LifeCycleState.DELETED); - - this.stateMachine = new StateMachine<>(LifeCycleState.ALLOCATED, - finalStates); - initializeStateMachine(); - - this.containerSize = (long) configuration.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.BYTES); - - lastUsedMap = new ConcurrentHashMap<>(); - containerCount = new AtomicLong(0); - containers = new ContainerStateMap(); - loadExistingContainers(containerMapping, pipelineSelector); - } - - private void loadExistingContainers(Mapping containerMapping, - PipelineSelector pipelineSelector) { - - List containerList; - try { - containerList = containerMapping.listContainer(0, Integer.MAX_VALUE); - - // if there are no container to load, let us return. - if (containerList == null || containerList.size() == 0) { - LOG.info("No containers to load for this cluster."); - return; - } - } catch (IOException e) { - if (!e.getMessage().equals("No container exists in current db")) { - LOG.error("Could not list the containers", e); - } - return; - } - - try { - long maxID = 0; - for (ContainerInfo container : containerList) { - containers.addContainer(container); - pipelineSelector.addContainerToPipeline( - container.getPipelineID(), container.getContainerID()); - - if (maxID < container.getContainerID()) { - maxID = container.getContainerID(); - } - - containerCount.set(maxID); - } - } catch (SCMException ex) { - LOG.error("Unable to create a container information. ", ex); - // Fix me, what is the proper shutdown procedure for SCM ?? - // System.exit(1) // Should we exit here? - } - } - - /** - * Return the info of all the containers kept by the in-memory mapping. - * - * @return the list of all container info. - */ - public List getAllContainers() { - List list = new ArrayList<>(); - - //No Locking needed since the return value is an immutable map. - containers.getContainerMap().forEach((key, value) -> list.add(value)); - return list; - } - - /* - * - * Event and State Transition Mapping: - * - * State: ALLOCATED ---------------> CREATING - * Event: CREATE - * - * State: CREATING ---------------> OPEN - * Event: CREATED - * - * State: OPEN ---------------> CLOSING - * Event: FINALIZE - * - * State: CLOSING ---------------> CLOSED - * Event: CLOSE - * - * State: CLOSED ----------------> DELETING - * Event: DELETE - * - * State: DELETING ----------------> DELETED - * Event: CLEANUP - * - * State: CREATING ---------------> DELETING - * Event: TIMEOUT - * - * - * Container State Flow: - * - * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]------->[CLOSED] - * (CREATE) | (CREATED) (FINALIZE) (CLOSE) | - * | | - * | | - * |(TIMEOUT) (DELETE)| - * | | - * +-------------> [DELETING] <-------------------+ - * | - * | - * (CLEANUP)| - * | - * [DELETED] - */ - private void initializeStateMachine() { - stateMachine.addTransition(LifeCycleState.ALLOCATED, - LifeCycleState.CREATING, - LifeCycleEvent.CREATE); - - stateMachine.addTransition(LifeCycleState.CREATING, - LifeCycleState.OPEN, - LifeCycleEvent.CREATED); - - stateMachine.addTransition(LifeCycleState.OPEN, - LifeCycleState.CLOSING, - LifeCycleEvent.FINALIZE); - - stateMachine.addTransition(LifeCycleState.CLOSING, - LifeCycleState.CLOSED, - LifeCycleEvent.CLOSE); - - stateMachine.addTransition(LifeCycleState.CLOSED, - LifeCycleState.DELETING, - LifeCycleEvent.DELETE); - - stateMachine.addTransition(LifeCycleState.CREATING, - LifeCycleState.DELETING, - LifeCycleEvent.TIMEOUT); - - stateMachine.addTransition(LifeCycleState.DELETING, - LifeCycleState.DELETED, - LifeCycleEvent.CLEANUP); - } - - /** - * allocates a new container based on the type, replication etc. - * - * @param selector -- Pipeline selector class. - * @param type -- Replication type. - * @param replicationFactor - Replication replicationFactor. - * @return ContainerWithPipeline - * @throws IOException on Failure. - */ - public ContainerWithPipeline allocateContainer(PipelineSelector selector, - HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor replicationFactor, String owner) - throws IOException { - - Pipeline pipeline = selector.getReplicationPipeline(type, - replicationFactor); - - Preconditions.checkNotNull(pipeline, "Pipeline type=%s/" - + "replication=%s couldn't be found for the new container. " - + "Do you have enough nodes?", type, replicationFactor); - - long containerID = containerCount.incrementAndGet(); - ContainerInfo containerInfo = new ContainerInfo.Builder() - .setState(HddsProtos.LifeCycleState.ALLOCATED) - .setPipelineID(pipeline.getId()) - // This is bytes allocated for blocks inside container, not the - // container size - .setAllocatedBytes(0) - .setUsedBytes(0) - .setNumberOfKeys(0) - .setStateEnterTime(Time.monotonicNow()) - .setOwner(owner) - .setContainerID(containerID) - .setDeleteTransactionId(0) - .setReplicationFactor(replicationFactor) - .setReplicationType(pipeline.getType()) - .build(); - selector.addContainerToPipeline(pipeline.getId(), containerID); - Preconditions.checkNotNull(containerInfo); - containers.addContainer(containerInfo); - LOG.trace("New container allocated: {}", containerInfo); - return new ContainerWithPipeline(containerInfo, pipeline); - } - - /** - * Update the Container State to the next state. - * - * @param info - ContainerInfo - * @param event - LifeCycle Event - * @return Updated ContainerInfo. - * @throws SCMException on Failure. - */ - public ContainerInfo updateContainerState(ContainerInfo - info, HddsProtos.LifeCycleEvent event) throws SCMException { - LifeCycleState newState; - try { - newState = this.stateMachine.getNextState(info.getState(), event); - } catch (InvalidStateTransitionException ex) { - String error = String.format("Failed to update container state %s, " + - "reason: invalid state transition from state: %s upon " + - "event: %s.", - info.getContainerID(), info.getState(), event); - LOG.error(error); - throw new SCMException(error, FAILED_TO_CHANGE_CONTAINER_STATE); - } - - // This is a post condition after executing getNextState. - Preconditions.checkNotNull(newState); - containers.updateState(info, info.getState(), newState); - return containers.getContainerInfo(info); - } - - /** - * Update the container State. - * @param info - Container Info - * @return ContainerInfo - * @throws SCMException - on Error. - */ - public ContainerInfo updateContainerInfo(ContainerInfo info) - throws SCMException { - containers.updateContainerInfo(info); - return containers.getContainerInfo(info); - } - - /** - * Update deleteTransactionId for a container. - * - * @param deleteTransactionMap maps containerId to its new - * deleteTransactionID - */ - public void updateDeleteTransactionId(Map deleteTransactionMap) { - for (Map.Entry entry : deleteTransactionMap.entrySet()) { - containers.getContainerMap().get(ContainerID.valueof(entry.getKey())) - .updateDeleteTransactionId(entry.getValue()); - } - } - - /** - * Return a container matching the attributes specified. - * - * @param size - Space needed in the Container. - * @param owner - Owner of the container - A specific nameservice. - * @param type - Replication Type {StandAlone, Ratis} - * @param factor - Replication Factor {ONE, THREE} - * @param state - State of the Container-- {Open, Allocated etc.} - * @return ContainerInfo, null if there is no match found. - */ - public ContainerInfo getMatchingContainer(final long size, - String owner, ReplicationType type, ReplicationFactor factor, - LifeCycleState state) { - - // Find containers that match the query spec, if no match return null. - NavigableSet matchingSet = - containers.getMatchingContainerIDs(state, owner, factor, type); - if (matchingSet == null || matchingSet.size() == 0) { - return null; - } - - // Get the last used container and find container above the last used - // container ID. - ContainerState key = new ContainerState(owner, type, factor); - ContainerID lastID = lastUsedMap.get(key); - if (lastID == null) { - lastID = matchingSet.first(); - } - - // There is a small issue here. The first time, we will skip the first - // container. But in most cases it will not matter. - NavigableSet resultSet = matchingSet.tailSet(lastID, false); - if (resultSet.size() == 0) { - resultSet = matchingSet; - } - - ContainerInfo selectedContainer = - findContainerWithSpace(size, resultSet, owner); - if (selectedContainer == null) { - - // If we did not find any space in the tailSet, we need to look for - // space in the headset, we need to pass true to deal with the - // situation that we have a lone container that has space. That is we - // ignored the last used container under the assumption we can find - // other containers with space, but if have a single container that is - // not true. Hence we need to include the last used container as the - // last element in the sorted set. - - resultSet = matchingSet.headSet(lastID, true); - selectedContainer = findContainerWithSpace(size, resultSet, owner); - } - // Update the allocated Bytes on this container. - if (selectedContainer != null) { - selectedContainer.updateAllocatedBytes(size); - } - return selectedContainer; - - } - - private ContainerInfo findContainerWithSpace(long size, - NavigableSet searchSet, String owner) { - // Get the container with space to meet our request. - for (ContainerID id : searchSet) { - ContainerInfo containerInfo = containers.getContainerInfo(id); - if (containerInfo.getAllocatedBytes() + size <= this.containerSize) { - containerInfo.updateLastUsedTime(); - - ContainerState key = new ContainerState(owner, - containerInfo.getReplicationType(), - containerInfo.getReplicationFactor()); - lastUsedMap.put(key, containerInfo.containerID()); - return containerInfo; - } - } - return null; - } - - /** - * Returns a set of ContainerIDs that match the Container. - * - * @param owner Owner of the Containers. - * @param type - Replication Type of the containers - * @param factor - Replication factor of the containers. - * @param state - Current State, like Open, Close etc. - * @return Set of containers that match the specific query parameters. - */ - public NavigableSet getMatchingContainerIDs( - String owner, ReplicationType type, ReplicationFactor factor, - LifeCycleState state) { - return containers.getMatchingContainerIDs(state, owner, - factor, type); - } - - /** - * Returns the containerInfo with pipeline for the given container id. - * @param selector -- Pipeline selector class. - * @param containerID id of the container - * @return ContainerInfo containerInfo - * @throws IOException - */ - public ContainerWithPipeline getContainer(PipelineSelector selector, - ContainerID containerID) { - ContainerInfo info = containers.getContainerInfo(containerID.getId()); - Pipeline pipeline = selector.getPipeline(info.getPipelineID()); - return new ContainerWithPipeline(info, pipeline); - } - - /** - * Returns the containerInfo for the given container id. - * @param containerID id of the container - * @return ContainerInfo containerInfo - * @throws IOException - */ - public ContainerInfo getContainer(ContainerID containerID) { - return containers.getContainerInfo(containerID); - } - - @Override - public void close() throws IOException { - } - - /** - * Returns the latest list of DataNodes where replica for given containerId - * exist. Throws an SCMException if no entry is found for given containerId. - * - * @param containerID - * @return Set - */ - public Set getContainerReplicas(ContainerID containerID) - throws SCMException { - return containers.getContainerReplicas(containerID); - } - - /** - * Add a container Replica for given DataNode. - * - * @param containerID - * @param dn - */ - public void addContainerReplica(ContainerID containerID, DatanodeDetails dn) { - containers.addContainerReplica(containerID, dn); - } - - /** - * Remove a container Replica for given DataNode. - * - * @param containerID - * @param dn - * @return True of dataNode is removed successfully else false. - */ - public boolean removeContainerReplica(ContainerID containerID, - DatanodeDetails dn) throws SCMException { - return containers.removeContainerReplica(containerID, dn); - } - - /** - * Compare the existing replication number with the expected one. - */ - public ReplicationRequest checkReplicationState(ContainerID containerID) - throws SCMException { - int existingReplicas = getContainerReplicas(containerID).size(); - int expectedReplicas = getContainer(containerID) - .getReplicationFactor().getNumber(); - if (existingReplicas != expectedReplicas) { - return new ReplicationRequest(containerID.getId(), existingReplicas, - expectedReplicas); - } - return null; - } - - /** - * Checks if the container is open. - */ - public boolean isOpen(ContainerID containerID) { - Preconditions.checkNotNull(containerID); - ContainerInfo container = Preconditions - .checkNotNull(getContainer(containerID), - "Container can't be found " + containerID); - return container.isContainerOpen(); - } - - @VisibleForTesting - public ContainerStateMap getContainerStateMap() { - return containers; - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java deleted file mode 100644 index 5ed80cb47f3fe..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -/** - * Mapping class contains the mapping from a name to a pipeline mapping. This is - * used by SCM when allocating new locations and when looking up a key. - */ -public interface Mapping extends Closeable { - /** - * Returns the ContainerInfo from the container ID. - * - * @param containerID - ID of container. - * @return - ContainerInfo such as creation state and the pipeline. - * @throws IOException - */ - ContainerInfo getContainer(long containerID) throws IOException; - - /** - * Returns the ContainerInfo from the container ID. - * - * @param containerID - ID of container. - * @return - ContainerWithPipeline such as creation state and the pipeline. - * @throws IOException - */ - ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException; - - /** - * Returns containers under certain conditions. - * Search container IDs from start ID(exclusive), - * The max size of the searching range cannot exceed the - * value of count. - * - * @param startContainerID start containerID, >=0, - * start searching at the head if 0. - * @param count count must be >= 0 - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big. - * - * @return a list of container. - * @throws IOException - */ - List listContainer(long startContainerID, int count) - throws IOException; - - /** - * Allocates a new container for a given keyName and replication factor. - * - * @param replicationFactor - replication factor of the container. - * @param owner - * @return - ContainerWithPipeline. - * @throws IOException - */ - ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor replicationFactor, String owner) - throws IOException; - - /** - * Deletes a container from SCM. - * - * @param containerID - Container ID - * @throws IOException - */ - void deleteContainer(long containerID) throws IOException; - - /** - * Update container state. - * @param containerID - Container ID - * @param event - container life cycle event - * @return - new container state - * @throws IOException - */ - HddsProtos.LifeCycleState updateContainerState(long containerID, - HddsProtos.LifeCycleEvent event) throws IOException; - - /** - * Returns the container State Manager. - * @return ContainerStateManager - */ - ContainerStateManager getStateManager(); - - /** - * Process container report from Datanode. - * - * @param reports Container report - */ - void processContainerReports(DatanodeDetails datanodeDetails, - ContainerReportsProto reports, boolean isRegisterCall) - throws IOException; - - /** - * Update deleteTransactionId according to deleteTransactionMap. - * - * @param deleteTransactionMap Maps the containerId to latest delete - * transaction id for the container. - * @throws IOException - */ - void updateDeleteTransactionId(Map deleteTransactionMap) - throws IOException; - - /** - * Returns the ContainerWithPipeline. - * @return NodeManager - */ - ContainerWithPipeline getMatchingContainerWithPipeline(long size, - String owner, ReplicationType type, ReplicationFactor factor, - LifeCycleState state) throws IOException; - - PipelineSelector getPipelineSelector(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java deleted file mode 100644 index ee02bbd88f2b9..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * This package has class that close a container. That is move a container from - * open state to close state. - */ -package org.apache.hadoop.hdds.scm.container.closer; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java deleted file mode 100644 index 3f8d05681bdae..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; -/** - * This package contains routines to manage the container location and - * mapping inside SCM - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java deleted file mode 100644 index 3336c8e80e74e..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.IOException; -import java.util.List; - -/** - * A ContainerPlacementPolicy support choosing datanodes to build replication - * pipeline with specified constraints. - */ -public interface ContainerPlacementPolicy { - - /** - * Given the replication factor and size required, return set of datanodes - * that satisfy the nodes and size requirement. - * - * @param excludedNodes - list of nodes to be excluded. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return list of datanodes chosen. - * @throws IOException - */ - List chooseDatanodes(List excludedNodes, - int nodesRequired, long sizeRequired) - throws IOException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java deleted file mode 100644 index 60861b770c8a1..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java +++ /dev/null @@ -1,201 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.LinkedList; -import java.util.List; -import java.util.Random; -import java.util.stream.Collectors; - -/** - * SCM CommonPolicy implements a set of invariants which are common - * for all container placement policies, acts as the repository of helper - * functions which are common to placement policies. - */ -public abstract class SCMCommonPolicy implements ContainerPlacementPolicy { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMCommonPolicy.class); - private final NodeManager nodeManager; - private final Random rand; - private final Configuration conf; - - /** - * Constructs SCM Common Policy Class. - * - * @param nodeManager NodeManager - * @param conf Configuration class. - */ - public SCMCommonPolicy(NodeManager nodeManager, Configuration conf) { - this.nodeManager = nodeManager; - this.rand = new Random(); - this.conf = conf; - } - - /** - * Return node manager. - * - * @return node manager - */ - public NodeManager getNodeManager() { - return nodeManager; - } - - /** - * Returns the Random Object. - * - * @return rand - */ - public Random getRand() { - return rand; - } - - /** - * Get Config. - * - * @return Configuration - */ - public Configuration getConf() { - return conf; - } - - /** - * Given the replication factor and size required, return set of datanodes - * that satisfy the nodes and size requirement. - *

- * Here are some invariants of container placement. - *

- * 1. We place containers only on healthy nodes. - * 2. We place containers on nodes with enough space for that container. - * 3. if a set of containers are requested, we either meet the required - * number of nodes or we fail that request. - * - * - * @param excludedNodes - datanodes with existing replicas - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return list of datanodes chosen. - * @throws SCMException SCM exception. - */ - - public List chooseDatanodes( - List excludedNodes, - int nodesRequired, final long sizeRequired) throws SCMException { - List healthyNodes = - nodeManager.getNodes(HddsProtos.NodeState.HEALTHY); - healthyNodes.removeAll(excludedNodes); - String msg; - if (healthyNodes.size() == 0) { - msg = "No healthy node found to allocate container."; - LOG.error(msg); - throw new SCMException(msg, SCMException.ResultCodes - .FAILED_TO_FIND_HEALTHY_NODES); - } - - if (healthyNodes.size() < nodesRequired) { - msg = String.format("Not enough healthy nodes to allocate container. %d " - + " datanodes required. Found %d", - nodesRequired, healthyNodes.size()); - LOG.error(msg); - throw new SCMException(msg, - SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); - } - List healthyList = healthyNodes.stream().filter(d -> - hasEnoughSpace(d, sizeRequired)).collect(Collectors.toList()); - - if (healthyList.size() < nodesRequired) { - msg = String.format("Unable to find enough nodes that meet the space " + - "requirement of %d bytes in healthy node set." + - " Nodes required: %d Found: %d", - sizeRequired, nodesRequired, healthyList.size()); - LOG.error(msg); - throw new SCMException(msg, - SCMException.ResultCodes.FAILED_TO_FIND_NODES_WITH_SPACE); - } - - return healthyList; - } - - /** - * Returns true if this node has enough space to meet our requirement. - * - * @param datanodeDetails DatanodeDetails - * @return true if we have enough space. - */ - private boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long sizeRequired) { - SCMNodeMetric nodeMetric = nodeManager.getNodeStat(datanodeDetails); - return (nodeMetric != null) && (nodeMetric.get() != null) - && nodeMetric.get().getRemaining().hasResources(sizeRequired); - } - - /** - * This function invokes the derived classes chooseNode Function to build a - * list of nodes. Then it verifies that invoked policy was able to return - * expected number of nodes. - * - * @param nodesRequired - Nodes Required - * @param healthyNodes - List of Nodes in the result set. - * @return List of Datanodes that can be used for placement. - * @throws SCMException - */ - public List getResultSet( - int nodesRequired, List healthyNodes) - throws SCMException { - List results = new LinkedList<>(); - for (int x = 0; x < nodesRequired; x++) { - // invoke the choose function defined in the derived classes. - DatanodeDetails nodeId = chooseNode(healthyNodes); - if (nodeId != null) { - results.add(nodeId); - } - } - - if (results.size() < nodesRequired) { - LOG.error("Unable to find the required number of healthy nodes that " + - "meet the criteria. Required nodes: {}, Found nodes: {}", - nodesRequired, results.size()); - throw new SCMException("Unable to find required number of nodes.", - SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); - } - return results; - } - - /** - * Choose a datanode according to the policy, this function is implemented - * by the actual policy class. For example, PlacementCapacity or - * PlacementRandom. - * - * @param healthyNodes - Set of healthy nodes we can choose from. - * @return DatanodeDetails - */ - public abstract DatanodeDetails chooseNode( - List healthyNodes); - - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java deleted file mode 100644 index 8df8f6e034d1b..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Container placement policy that randomly choose datanodes with remaining - * space to satisfy the size constraints. - *

- * The Algorithm is as follows, Pick 2 random nodes from a given pool of nodes - * and then pick the node which lower utilization. This leads to a higher - * probability of nodes with lower utilization to be picked. - *

- * For those wondering why we choose two nodes randomly and choose the node - * with lower utilization. There are links to this original papers in - * HDFS-11564. - *

- * A brief summary -- We treat the nodes from a scale of lowest utilized to - * highest utilized, there are (s * ( s + 1)) / 2 possibilities to build - * distinct pairs of nodes. There are s - k pairs of nodes in which the rank - * k node is less than the couple. So probability of a picking a node is - * (2 * (s -k)) / (s * (s - 1)). - *

- * In English, There is a much higher probability of picking less utilized nodes - * as compared to nodes with higher utilization since we pick 2 nodes and - * then pick the node with lower utilization. - *

- * This avoids the issue of users adding new nodes into the cluster and HDFS - * sending all traffic to those nodes if we only use a capacity based - * allocation scheme. Unless those nodes are part of the set of the first 2 - * nodes then newer nodes will not be in the running to get the container. - *

- * This leads to an I/O pattern where the lower utilized nodes are favoured - * more than higher utilized nodes, but part of the I/O will still go to the - * older higher utilized nodes. - *

- * With this algorithm in place, our hope is that balancer tool needs to do - * little or no work and the cluster will achieve a balanced distribution - * over time. - */ -public final class SCMContainerPlacementCapacity extends SCMCommonPolicy { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMContainerPlacementCapacity.class); - - /** - * Constructs a Container Placement with considering only capacity. - * That is this policy tries to place containers based on node weight. - * - * @param nodeManager Node Manager - * @param conf Configuration - */ - public SCMContainerPlacementCapacity(final NodeManager nodeManager, - final Configuration conf) { - super(nodeManager, conf); - } - - /** - * Called by SCM to choose datanodes. - * - * - * @param excludedNodes - list of the datanodes to exclude. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return List of datanodes. - * @throws SCMException SCMException - */ - @Override - public List chooseDatanodes( - List excludedNodes, final int nodesRequired, - final long sizeRequired) throws SCMException { - List healthyNodes = - super.chooseDatanodes(excludedNodes, nodesRequired, sizeRequired); - if (healthyNodes.size() == nodesRequired) { - return healthyNodes; - } - return getResultSet(nodesRequired, healthyNodes); - } - - /** - * Find a node from the healthy list and return it after removing it from the - * list that we are operating on. - * - * @param healthyNodes - List of healthy nodes that meet the size - * requirement. - * @return DatanodeDetails that is chosen. - */ - @Override - public DatanodeDetails chooseNode(List healthyNodes) { - int firstNodeNdx = getRand().nextInt(healthyNodes.size()); - int secondNodeNdx = getRand().nextInt(healthyNodes.size()); - - DatanodeDetails datanodeDetails; - // There is a possibility that both numbers will be same. - // if that is so, we just return the node. - if (firstNodeNdx == secondNodeNdx) { - datanodeDetails = healthyNodes.get(firstNodeNdx); - } else { - DatanodeDetails firstNodeDetails = healthyNodes.get(firstNodeNdx); - DatanodeDetails secondNodeDetails = healthyNodes.get(secondNodeNdx); - SCMNodeMetric firstNodeMetric = - getNodeManager().getNodeStat(firstNodeDetails); - SCMNodeMetric secondNodeMetric = - getNodeManager().getNodeStat(secondNodeDetails); - datanodeDetails = firstNodeMetric.isGreater(secondNodeMetric.get()) - ? firstNodeDetails : secondNodeDetails; - } - healthyNodes.remove(datanodeDetails); - return datanodeDetails; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java deleted file mode 100644 index 76702d555ef47..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * Container placement policy that randomly chooses healthy datanodes. - * This is very similar to current HDFS placement. That is we - * just randomly place containers without any considerations of utilization. - *

- * That means we rely on balancer to achieve even distribution of data. - * Balancer will need to support containers as a feature before this class - * can be practically used. - */ -public final class SCMContainerPlacementRandom extends SCMCommonPolicy - implements ContainerPlacementPolicy { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMContainerPlacementRandom.class); - - /** - * Construct a random Block Placement policy. - * - * @param nodeManager nodeManager - * @param conf Config - */ - public SCMContainerPlacementRandom(final NodeManager nodeManager, - final Configuration conf) { - super(nodeManager, conf); - } - - /** - * Choose datanodes called by the SCM to choose the datanode. - * - * - * @param excludedNodes - list of the datanodes to exclude. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return List of Datanodes. - * @throws SCMException SCMException - */ - @Override - public List chooseDatanodes( - List excludedNodes, final int nodesRequired, - final long sizeRequired) throws SCMException { - List healthyNodes = - super.chooseDatanodes(excludedNodes, nodesRequired, sizeRequired); - - if (healthyNodes.size() == nodesRequired) { - return healthyNodes; - } - return getResultSet(nodesRequired, healthyNodes); - } - - /** - * Just chose a node randomly and remove it from the set of nodes we can - * chose from. - * - * @param healthyNodes - all healthy datanodes. - * @return one randomly chosen datanode that from two randomly chosen datanode - */ - public DatanodeDetails chooseNode(final List healthyNodes) { - DatanodeDetails selectedNode = - healthyNodes.get(getRand().nextInt(healthyNodes.size())); - healthyNodes.remove(selectedNode); - return selectedNode; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java deleted file mode 100644 index 1cb810dd0e5e7..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; -// Various placement algorithms. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java deleted file mode 100644 index b8e89987638d2..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import java.io.IOException; - -/** - * This class represents the SCM container stat. - */ -public class ContainerStat { - /** - * The maximum container size. - */ - @JsonProperty("Size") - private LongMetric size; - - /** - * The number of bytes used by the container. - */ - @JsonProperty("Used") - private LongMetric used; - - /** - * The number of keys in the container. - */ - @JsonProperty("KeyCount") - private LongMetric keyCount; - - /** - * The number of bytes read from the container. - */ - @JsonProperty("ReadBytes") - private LongMetric readBytes; - - /** - * The number of bytes write into the container. - */ - @JsonProperty("WriteBytes") - private LongMetric writeBytes; - - /** - * The number of times the container is read. - */ - @JsonProperty("ReadCount") - private LongMetric readCount; - - /** - * The number of times the container is written into. - */ - @JsonProperty("WriteCount") - private LongMetric writeCount; - - public ContainerStat() { - this(0L, 0L, 0L, 0L, 0L, 0L, 0L); - } - - public ContainerStat(long size, long used, long keyCount, long readBytes, - long writeBytes, long readCount, long writeCount) { - Preconditions.checkArgument(size >= 0, - "Container size cannot be " + "negative."); - Preconditions.checkArgument(used >= 0, - "Used space cannot be " + "negative."); - Preconditions.checkArgument(keyCount >= 0, - "Key count cannot be " + "negative"); - Preconditions.checkArgument(readBytes >= 0, - "Read bytes read cannot be " + "negative."); - Preconditions.checkArgument(readBytes >= 0, - "Write bytes cannot be " + "negative."); - Preconditions.checkArgument(readCount >= 0, - "Read count cannot be " + "negative."); - Preconditions.checkArgument(writeCount >= 0, - "Write count cannot be " + "negative"); - - this.size = new LongMetric(size); - this.used = new LongMetric(used); - this.keyCount = new LongMetric(keyCount); - this.readBytes = new LongMetric(readBytes); - this.writeBytes = new LongMetric(writeBytes); - this.readCount = new LongMetric(readCount); - this.writeCount = new LongMetric(writeCount); - } - - public LongMetric getSize() { - return size; - } - - public LongMetric getUsed() { - return used; - } - - public LongMetric getKeyCount() { - return keyCount; - } - - public LongMetric getReadBytes() { - return readBytes; - } - - public LongMetric getWriteBytes() { - return writeBytes; - } - - public LongMetric getReadCount() { - return readCount; - } - - public LongMetric getWriteCount() { - return writeCount; - } - - public void add(ContainerStat stat) { - if (stat == null) { - return; - } - - this.size.add(stat.getSize().get()); - this.used.add(stat.getUsed().get()); - this.keyCount.add(stat.getKeyCount().get()); - this.readBytes.add(stat.getReadBytes().get()); - this.writeBytes.add(stat.getWriteBytes().get()); - this.readCount.add(stat.getReadCount().get()); - this.writeCount.add(stat.getWriteCount().get()); - } - - public void subtract(ContainerStat stat) { - if (stat == null) { - return; - } - - this.size.subtract(stat.getSize().get()); - this.used.subtract(stat.getUsed().get()); - this.keyCount.subtract(stat.getKeyCount().get()); - this.readBytes.subtract(stat.getReadBytes().get()); - this.writeBytes.subtract(stat.getWriteBytes().get()); - this.readCount.subtract(stat.getReadCount().get()); - this.writeCount.subtract(stat.getWriteCount().get()); - } - - public String toJsonString() { - try { - return JsonUtils.toJsonString(this); - } catch (IOException ignored) { - return null; - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java deleted file mode 100644 index a6e732c750314..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -/** - * DatanodeMetric acts as the basis for all the metric that is used in - * comparing 2 datanodes. - */ -public interface DatanodeMetric extends Comparable { - - /** - * Some syntactic sugar over Comparable interface. This makes code easier to - * read. - * - * @param o - Other Object - * @return - True if *this* object is greater than argument. - */ - boolean isGreater(T o); - - /** - * Inverse of isGreater. - * - * @param o - other object. - * @return True if *this* object is Lesser than argument. - */ - boolean isLess(T o); - - /** - * Returns true if the object has same values. Because of issues with - * equals, and loss of type information this interface supports isEqual. - * - * @param o object to compare. - * @return True, if the values match. - */ - boolean isEqual(T o); - - /** - * A resourceCheck, defined by resourceNeeded. - * For example, S could be bytes required - * and DatanodeMetric can reply by saying it can be met or not. - * - * @param resourceNeeded - ResourceNeeded in its own metric. - * @return boolean, True if this resource requirement can be met. - */ - boolean hasResources(S resourceNeeded) throws SCMException; - - /** - * Returns the metric. - * - * @return T, the object that represents this metric. - */ - T get(); - - /** - * Sets the value of this metric. - * - * @param value - value of the metric. - */ - void set(T value); - - /** - * Adds a value of to the base. - * @param value - value - */ - void add(T value); - - /** - * subtract a value. - * @param value value - */ - void subtract(T value); - - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java deleted file mode 100644 index 050d26bd23c94..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; - -/** - * An helper class for all metrics based on Longs. - */ -@JsonAutoDetect(fieldVisibility = Visibility.ANY) -public class LongMetric implements DatanodeMetric { - private Long value; - - /** - * Constructs a long Metric. - * - * @param value Value for this metric. - */ - public LongMetric(Long value) { - this.value = value; - } - - /** - * Some syntactic sugar over Comparable interface. This makes code easier to - * read. - * - * @param o - Other Object - * @return - True if *this* object is greater than argument. - */ - @Override - public boolean isGreater(Long o) { - return compareTo(o) > 0; - } - - /** - * Inverse of isGreater. - * - * @param o - other object. - * @return True if *this* object is Lesser than argument. - */ - @Override - public boolean isLess(Long o) { - return compareTo(o) < 0; - } - - /** - * Returns true if the object has same values. Because of issues with - * equals, and loss of type information this interface supports isEqual. - * - * @param o object to compare. - * @return True, if the values match. - */ - @Override - public boolean isEqual(Long o) { - return compareTo(o) == 0; - } - - /** - * A resourceCheck, defined by resourceNeeded. - * For example, S could be bytes required - * and DatanodeMetric can reply by saying it can be met or not. - * - * @param resourceNeeded - ResourceNeeded in its own metric. - * @return boolean, True if this resource requirement can be met. - */ - @Override - public boolean hasResources(Long resourceNeeded) { - return isGreater(resourceNeeded); - } - - /** - * Returns the metric. - * - * @return T, the object that represents this metric. - */ - @Override - public Long get() { - return this.value; - } - - /** - * Sets the value of this metric. - * - * @param setValue - value of the metric. - */ - @Override - public void set(Long setValue) { - this.value = setValue; - - } - - /** - * Adds a value of to the base. - * - * @param addValue - value - */ - @Override - public void add(Long addValue) { - this.value += addValue; - } - - /** - * subtract a value. - * - * @param subValue value - */ - @Override - public void subtract(Long subValue) { - this.value -= subValue; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(Long o) { - return Long.compare(this.value, o); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - LongMetric that = (LongMetric) o; - - return value != null ? value.equals(that.value) : that.value == null; - } - - @Override - public int hashCode() { - return value != null ? value.hashCode() : 0; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java deleted file mode 100644 index d6857d395cfbc..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Interface that defines Node Stats. - */ -interface NodeStat { - /** - * Get capacity of the node. - * @return capacity of the node. - */ - LongMetric getCapacity(); - - /** - * Get the used space of the node. - * @return the used space of the node. - */ - LongMetric getScmUsed(); - - /** - * Get the remaining space of the node. - * @return the remaining space of the node. - */ - LongMetric getRemaining(); - - /** - * Set the total/used/remaining space. - * @param capacity - total space. - * @param used - used space. - * @param remain - remaining space. - */ - @VisibleForTesting - void set(long capacity, long used, long remain); - - /** - * Adding of the stat. - * @param stat - stat to be added. - * @return updated node stat. - */ - NodeStat add(NodeStat stat); - - /** - * Subtract of the stat. - * @param stat - stat to be subtracted. - * @return updated nodestat. - */ - NodeStat subtract(NodeStat stat); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java deleted file mode 100644 index e4dd9aa37efcd..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; - -/** - * This class is for maintaining StorageContainerManager statistics. - */ -@Metrics(about="Storage Container Manager Metrics", context="dfs") -public class SCMMetrics { - public static final String SOURCE_NAME = - SCMMetrics.class.getSimpleName(); - - /** - * Container stat metrics, the meaning of following metrics - * can be found in {@link ContainerStat}. - */ - @Metric private MutableGaugeLong lastContainerReportSize; - @Metric private MutableGaugeLong lastContainerReportUsed; - @Metric private MutableGaugeLong lastContainerReportKeyCount; - @Metric private MutableGaugeLong lastContainerReportReadBytes; - @Metric private MutableGaugeLong lastContainerReportWriteBytes; - @Metric private MutableGaugeLong lastContainerReportReadCount; - @Metric private MutableGaugeLong lastContainerReportWriteCount; - - @Metric private MutableCounterLong containerReportSize; - @Metric private MutableCounterLong containerReportUsed; - @Metric private MutableCounterLong containerReportKeyCount; - @Metric private MutableCounterLong containerReportReadBytes; - @Metric private MutableCounterLong containerReportWriteBytes; - @Metric private MutableCounterLong containerReportReadCount; - @Metric private MutableCounterLong containerReportWriteCount; - - public SCMMetrics() { - } - - public static SCMMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, "Storage Container Manager Metrics", - new SCMMetrics()); - } - - public void setLastContainerReportSize(long size) { - this.lastContainerReportSize.set(size); - } - - public void setLastContainerReportUsed(long used) { - this.lastContainerReportUsed.set(used); - } - - public void setLastContainerReportKeyCount(long keyCount) { - this.lastContainerReportKeyCount.set(keyCount); - } - - public void setLastContainerReportReadBytes(long readBytes) { - this.lastContainerReportReadBytes.set(readBytes); - } - - public void setLastContainerReportWriteBytes(long writeBytes) { - this.lastContainerReportWriteBytes.set(writeBytes); - } - - public void setLastContainerReportReadCount(long readCount) { - this.lastContainerReportReadCount.set(readCount); - } - - public void setLastContainerReportWriteCount(long writeCount) { - this.lastContainerReportWriteCount.set(writeCount); - } - - public void incrContainerReportSize(long size) { - this.containerReportSize.incr(size); - } - - public void incrContainerReportUsed(long used) { - this.containerReportUsed.incr(used); - } - - public void incrContainerReportKeyCount(long keyCount) { - this.containerReportKeyCount.incr(keyCount); - } - - public void incrContainerReportReadBytes(long readBytes) { - this.containerReportReadBytes.incr(readBytes); - } - - public void incrContainerReportWriteBytes(long writeBytes) { - this.containerReportWriteBytes.incr(writeBytes); - } - - public void incrContainerReportReadCount(long readCount) { - this.containerReportReadCount.incr(readCount); - } - - public void incrContainerReportWriteCount(long writeCount) { - this.containerReportWriteCount.incr(writeCount); - } - - public void setLastContainerStat(ContainerStat newStat) { - this.lastContainerReportSize.set(newStat.getSize().get()); - this.lastContainerReportUsed.set(newStat.getUsed().get()); - this.lastContainerReportKeyCount.set(newStat.getKeyCount().get()); - this.lastContainerReportReadBytes.set(newStat.getReadBytes().get()); - this.lastContainerReportWriteBytes.set(newStat.getWriteBytes().get()); - this.lastContainerReportReadCount.set(newStat.getReadCount().get()); - this.lastContainerReportWriteCount.set(newStat.getWriteCount().get()); - } - - public void incrContainerStat(ContainerStat deltaStat) { - this.containerReportSize.incr(deltaStat.getSize().get()); - this.containerReportUsed.incr(deltaStat.getUsed().get()); - this.containerReportKeyCount.incr(deltaStat.getKeyCount().get()); - this.containerReportReadBytes.incr(deltaStat.getReadBytes().get()); - this.containerReportWriteBytes.incr(deltaStat.getWriteBytes().get()); - this.containerReportReadCount.incr(deltaStat.getReadCount().get()); - this.containerReportWriteCount.incr(deltaStat.getWriteCount().get()); - } - - public void decrContainerStat(ContainerStat deltaStat) { - this.containerReportSize.incr(-1 * deltaStat.getSize().get()); - this.containerReportUsed.incr(-1 * deltaStat.getUsed().get()); - this.containerReportKeyCount.incr(-1 * deltaStat.getKeyCount().get()); - this.containerReportReadBytes.incr(-1 * deltaStat.getReadBytes().get()); - this.containerReportWriteBytes.incr(-1 * deltaStat.getWriteBytes().get()); - this.containerReportReadCount.incr(-1 * deltaStat.getReadCount().get()); - this.containerReportWriteCount.incr(-1 * deltaStat.getWriteCount().get()); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java deleted file mode 100644 index efd5fd60ab101..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -/** - * SCM Node Metric that is used in the placement classes. - */ -public class SCMNodeMetric implements DatanodeMetric { - private SCMNodeStat stat; - - /** - * Constructs an SCMNode Metric. - * - * @param stat - SCMNodeStat. - */ - public SCMNodeMetric(SCMNodeStat stat) { - this.stat = stat; - } - - /** - * Set the capacity, used and remaining space on a datanode. - * - * @param capacity in bytes - * @param used in bytes - * @param remaining in bytes - */ - @VisibleForTesting - public SCMNodeMetric(long capacity, long used, long remaining) { - this.stat = new SCMNodeStat(); - this.stat.set(capacity, used, remaining); - } - - /** - * - * @param o - Other Object - * @return - True if *this* object is greater than argument. - */ - @Override - public boolean isGreater(SCMNodeStat o) { - Preconditions.checkNotNull(this.stat, "Argument cannot be null"); - Preconditions.checkNotNull(o, "Argument cannot be null"); - - // if zero, replace with 1 for the division to work. - long thisDenominator = (this.stat.getCapacity().get() == 0) - ? 1 : this.stat.getCapacity().get(); - long otherDenominator = (o.getCapacity().get() == 0) - ? 1 : o.getCapacity().get(); - - float thisNodeWeight = - stat.getScmUsed().get() / (float) thisDenominator; - - float oNodeWeight = - o.getScmUsed().get() / (float) otherDenominator; - - if (Math.abs(thisNodeWeight - oNodeWeight) > 0.000001) { - return thisNodeWeight > oNodeWeight; - } - // if these nodes are have similar weight then return the node with more - // free space as the greater node. - return stat.getRemaining().isGreater(o.getRemaining().get()); - } - - /** - * Inverse of isGreater. - * - * @param o - other object. - * @return True if *this* object is Lesser than argument. - */ - @Override - public boolean isLess(SCMNodeStat o) { - Preconditions.checkNotNull(o, "Argument cannot be null"); - - // if zero, replace with 1 for the division to work. - long thisDenominator = (this.stat.getCapacity().get() == 0) - ? 1 : this.stat.getCapacity().get(); - long otherDenominator = (o.getCapacity().get() == 0) - ? 1 : o.getCapacity().get(); - - float thisNodeWeight = - stat.getScmUsed().get() / (float) thisDenominator; - - float oNodeWeight = - o.getScmUsed().get() / (float) otherDenominator; - - if (Math.abs(thisNodeWeight - oNodeWeight) > 0.000001) { - return thisNodeWeight < oNodeWeight; - } - - // if these nodes are have similar weight then return the node with less - // free space as the lesser node. - return stat.getRemaining().isLess(o.getRemaining().get()); - } - - /** - * Returns true if the object has same values. Because of issues with - * equals, and loss of type information this interface supports isEqual. - * - * @param o object to compare. - * @return True, if the values match. - * TODO : Consider if it makes sense to add remaining to this equation. - */ - @Override - public boolean isEqual(SCMNodeStat o) { - float thisNodeWeight = stat.getScmUsed().get() / (float) - stat.getCapacity().get(); - float oNodeWeight = o.getScmUsed().get() / (float) o.getCapacity().get(); - return Math.abs(thisNodeWeight - oNodeWeight) < 0.000001; - } - - /** - * A resourceCheck, defined by resourceNeeded. - * For example, S could be bytes required - * and DatanodeMetric can reply by saying it can be met or not. - * - * @param resourceNeeded - ResourceNeeded in its own metric. - * @return boolean, True if this resource requirement can be met. - */ - @Override - public boolean hasResources(Long resourceNeeded) { - return false; - } - - /** - * Returns the metric. - * - * @return T, the object that represents this metric. - */ - @Override - public SCMNodeStat get() { - return stat; - } - - /** - * Sets the value of this metric. - * - * @param value - value of the metric. - */ - @Override - public void set(SCMNodeStat value) { - stat.set(value.getCapacity().get(), value.getScmUsed().get(), - value.getRemaining().get()); - } - - /** - * Adds a value of to the base. - * - * @param value - value - */ - @Override - public void add(SCMNodeStat value) { - stat.add(value); - } - - /** - * subtract a value. - * - * @param value value - */ - @Override - public void subtract(SCMNodeStat value) { - stat.subtract(value); - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(SCMNodeStat o) { - if (isEqual(o)) { - return 0; - } - if (isGreater(o)) { - return 1; - } else { - return -1; - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - SCMNodeMetric that = (SCMNodeMetric) o; - - return stat != null ? stat.equals(that.stat) : that.stat == null; - } - - @Override - public int hashCode() { - return stat != null ? stat.hashCode() : 0; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java deleted file mode 100644 index 3c871d3ef5043..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -/** - * This class represents the SCM node stat. - */ -public class SCMNodeStat implements NodeStat { - private LongMetric capacity; - private LongMetric scmUsed; - private LongMetric remaining; - - public SCMNodeStat() { - this(0L, 0L, 0L); - } - - public SCMNodeStat(SCMNodeStat other) { - this(other.capacity.get(), other.scmUsed.get(), other.remaining.get()); - } - - public SCMNodeStat(long capacity, long used, long remaining) { - Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " + - "negative."); - Preconditions.checkArgument(used >= 0, "used space cannot be " + - "negative."); - Preconditions.checkArgument(remaining >= 0, "remaining cannot be " + - "negative"); - this.capacity = new LongMetric(capacity); - this.scmUsed = new LongMetric(used); - this.remaining = new LongMetric(remaining); - } - - /** - * @return the total configured capacity of the node. - */ - public LongMetric getCapacity() { - return capacity; - } - - /** - * @return the total SCM used space on the node. - */ - public LongMetric getScmUsed() { - return scmUsed; - } - - /** - * @return the total remaining space available on the node. - */ - public LongMetric getRemaining() { - return remaining; - } - - /** - * Set the capacity, used and remaining space on a datanode. - * - * @param newCapacity in bytes - * @param newUsed in bytes - * @param newRemaining in bytes - */ - @VisibleForTesting - public void set(long newCapacity, long newUsed, long newRemaining) { - Preconditions.checkNotNull(newCapacity, "Capacity cannot be null"); - Preconditions.checkNotNull(newUsed, "used cannot be null"); - Preconditions.checkNotNull(newRemaining, "remaining cannot be null"); - - Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + - "negative."); - Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + - "negative."); - Preconditions.checkArgument(newRemaining >= 0, "remaining cannot be " + - "negative"); - - this.capacity = new LongMetric(newCapacity); - this.scmUsed = new LongMetric(newUsed); - this.remaining = new LongMetric(newRemaining); - } - - /** - * Adds a new nodestat to existing values of the node. - * - * @param stat Nodestat. - * @return SCMNodeStat - */ - public SCMNodeStat add(NodeStat stat) { - this.capacity.set(this.getCapacity().get() + stat.getCapacity().get()); - this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); - this.remaining.set(this.getRemaining().get() + stat.getRemaining().get()); - return this; - } - - /** - * Subtracts the stat values from the existing NodeStat. - * - * @param stat SCMNodeStat. - * @return Modified SCMNodeStat - */ - public SCMNodeStat subtract(NodeStat stat) { - this.capacity.set(this.getCapacity().get() - stat.getCapacity().get()); - this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); - this.remaining.set(this.getRemaining().get() - stat.getRemaining().get()); - return this; - } - - @Override - public boolean equals(Object to) { - if (to instanceof SCMNodeStat) { - SCMNodeStat tempStat = (SCMNodeStat) to; - return capacity.isEqual(tempStat.getCapacity().get()) && - scmUsed.isEqual(tempStat.getScmUsed().get()) && - remaining.isEqual(tempStat.getRemaining().get()); - } - return false; - } - - @Override - public int hashCode() { - return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get()); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java deleted file mode 100644 index 4a81d6921682a..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -// Various metrics supported by Datanode and used by SCM in the placement -// strategy. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java deleted file mode 100644 index dc54d9bd91288..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement; -// Classes related to container placement. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java deleted file mode 100644 index 993a98602441e..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import javax.management.ObjectName; -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.metrics2.util.MBeans; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Event listener to track the current state of replication. - */ -public class ReplicationActivityStatus implements - ReplicationActivityStatusMXBean, Closeable { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationActivityStatus.class); - - private AtomicBoolean replicationEnabled = new AtomicBoolean(); - private AtomicBoolean replicationStatusSetExternally = new AtomicBoolean(); - private ObjectName jmxObjectName; - private ReplicationStatusListener replicationStatusListener; - private ChillModeStatusListener chillModeStatusListener; - - public ReplicationActivityStatus(){ - replicationStatusListener = new ReplicationStatusListener(); - chillModeStatusListener = new ChillModeStatusListener(); - } - - public boolean isReplicationEnabled() { - return replicationEnabled.get(); - } - - @VisibleForTesting - public void setReplicationEnabled(boolean enabled) { - replicationEnabled.set(enabled); - } - - @VisibleForTesting - public void enableReplication() { - replicationEnabled.set(true); - } - - - public void start() { - try { - this.jmxObjectName = - MBeans.register( - "StorageContainerManager", "ReplicationActivityStatus", this); - } catch (Exception ex) { - LOG.error("JMX bean for ReplicationActivityStatus can't be registered", - ex); - } - } - - @Override - public void close() throws IOException { - if (this.jmxObjectName != null) { - MBeans.unregister(jmxObjectName); - } - } - - /** - * Replication status listener. - */ - class ReplicationStatusListener implements EventHandler { - @Override - public void onMessage(Boolean status, EventPublisher publisher) { - replicationStatusSetExternally.set(true); - replicationEnabled.set(status); - } - } - - /** - * Replication status is influenced by Chill mode status as well. - */ - class ChillModeStatusListener implements EventHandler { - - @Override - public void onMessage(Boolean inChillMode, EventPublisher publisher) { - if (!replicationStatusSetExternally.get()) { - replicationEnabled.set(!inChillMode); - } - } - } - - public ReplicationStatusListener getReplicationStatusListener() { - return replicationStatusListener; - } - - public ChillModeStatusListener getChillModeStatusListener() { - return chillModeStatusListener; - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java deleted file mode 100644 index 164bd247efbe8..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -/** - * JMX interface to monitor replication status. - */ -public interface ReplicationActivityStatusMXBean { - - boolean isReplicationEnabled(); - - void setReplicationEnabled(boolean enabled); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationCommandWatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationCommandWatcher.java deleted file mode 100644 index 03a81a7db867d..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationCommandWatcher.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager - .ReplicationCompleted; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager - .ReplicationRequestToRepeat; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventWatcher; -import org.apache.hadoop.ozone.lease.LeaseManager; - -/** - * Command watcher to track the replication commands. - */ -public class ReplicationCommandWatcher - extends - EventWatcher { - - public ReplicationCommandWatcher(Event startEvent, - Event completionEvent, - LeaseManager leaseManager) { - super(startEvent, completionEvent, leaseManager); - } - - @Override - protected void onTimeout(EventPublisher publisher, - ReplicationRequestToRepeat payload) { - //put back to the original queue - publisher.fireEvent(SCMEvents.REPLICATE_CONTAINER, - payload.getRequest()); - } - - @Override - protected void onFinished(EventPublisher publisher, - ReplicationRequestToRepeat payload) { - - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java deleted file mode 100644 index ddecdbcfa5c74..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ /dev/null @@ -1,250 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ThreadFactory; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import static org.apache.hadoop.hdds.scm.events.SCMEvents - .TRACK_REPLICATE_COMMAND; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Replication Manager manages the replication of the closed container. - */ -public class ReplicationManager implements Runnable { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManager.class); - - private ReplicationQueue replicationQueue; - - private ContainerPlacementPolicy containerPlacement; - - private EventPublisher eventPublisher; - - private ReplicationCommandWatcher replicationCommandWatcher; - - private boolean running = true; - - private ContainerStateManager containerStateManager; - - public ReplicationManager(ContainerPlacementPolicy containerPlacement, - ContainerStateManager containerStateManager, EventQueue eventQueue, - LeaseManager commandWatcherLeaseManager) { - - this.containerPlacement = containerPlacement; - this.containerStateManager = containerStateManager; - this.eventPublisher = eventQueue; - - this.replicationCommandWatcher = - new ReplicationCommandWatcher(TRACK_REPLICATE_COMMAND, - SCMEvents.REPLICATION_COMPLETE, commandWatcherLeaseManager); - - this.replicationQueue = new ReplicationQueue(); - - eventQueue.addHandler(SCMEvents.REPLICATE_CONTAINER, - (replicationRequest, publisher) -> replicationQueue - .add(replicationRequest)); - - this.replicationCommandWatcher.start(eventQueue); - - } - - public void start() { - - ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Replication Manager").build(); - - threadFactory.newThread(this).start(); - } - - public void run() { - - while (running) { - ReplicationRequest request = null; - try { - //TODO: add throttling here - request = replicationQueue.take(); - - ContainerID containerID = new ContainerID(request.getContainerId()); - ContainerInfo containerInfo = - containerStateManager.getContainer(containerID); - - Preconditions.checkNotNull(containerInfo, - "No information about the container " + request.getContainerId()); - - Preconditions - .checkState(containerInfo.getState() == LifeCycleState.CLOSED, - "Container should be in closed state"); - - //check the current replication - List datanodesWithReplicas = - new ArrayList<>(getCurrentReplicas(request)); - - if (datanodesWithReplicas.size() == 0) { - LOG.warn( - "Container {} should be replicated but can't find any existing " - + "replicas", - containerID); - return; - } - - ReplicationRequest finalRequest = request; - - int inFlightReplications = replicationCommandWatcher.getTimeoutEvents( - e -> e.request.getContainerId() == finalRequest.getContainerId()) - .size(); - - int deficit = - request.getExpecReplicationCount() - datanodesWithReplicas.size() - - inFlightReplications; - - if (deficit > 0) { - - List selectedDatanodes = containerPlacement - .chooseDatanodes(datanodesWithReplicas, deficit, - containerInfo.getUsedBytes()); - - //send the command - for (DatanodeDetails datanode : selectedDatanodes) { - - ReplicateContainerCommand replicateCommand = - new ReplicateContainerCommand(containerID.getId(), - datanodesWithReplicas); - - eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, - new CommandForDatanode<>( - datanode.getUuid(), replicateCommand)); - - ReplicationRequestToRepeat timeoutEvent = - new ReplicationRequestToRepeat(replicateCommand.getId(), - request); - - eventPublisher.fireEvent(TRACK_REPLICATE_COMMAND, timeoutEvent); - - } - - } else if (deficit < 0) { - //TODO: too many replicas. Not handled yet. - } - - } catch (Exception e) { - LOG.error("Can't replicate container {}", request, e); - } - } - - } - - @VisibleForTesting - protected Set getCurrentReplicas(ReplicationRequest request) - throws IOException { - return containerStateManager - .getContainerReplicas(new ContainerID(request.getContainerId())); - } - - @VisibleForTesting - public ReplicationQueue getReplicationQueue() { - return replicationQueue; - } - - public void stop() { - running = false; - } - - /** - * Event for the ReplicationCommandWatcher to repeate the embedded request. - * in case fof timeout. - */ - public static class ReplicationRequestToRepeat - implements IdentifiableEventPayload { - - private final long commandId; - - private final ReplicationRequest request; - - public ReplicationRequestToRepeat(long commandId, - ReplicationRequest request) { - this.commandId = commandId; - this.request = request; - } - - public ReplicationRequest getRequest() { - return request; - } - - @Override - public long getId() { - return commandId; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ReplicationRequestToRepeat that = (ReplicationRequestToRepeat) o; - return Objects.equals(request, that.request); - } - - @Override - public int hashCode() { - - return Objects.hash(request); - } - } - - public static class ReplicationCompleted implements IdentifiableEventPayload { - - private final long uuid; - - public ReplicationCompleted(long uuid) { - this.uuid = uuid; - } - - @Override - public long getId() { - return uuid; - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java deleted file mode 100644 index 4ca67be4e1181..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.util.List; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.PriorityBlockingQueue; - -/** - * Priority queue to handle under-replicated and over replicated containers - * in ozone. ReplicationManager will consume these messages and decide - * accordingly. - */ -public class ReplicationQueue { - - private final BlockingQueue queue; - - public ReplicationQueue() { - queue = new PriorityBlockingQueue<>(); - } - - public boolean add(ReplicationRequest repObj) { - if (this.queue.contains(repObj)) { - // Remove the earlier message and insert this one - this.queue.remove(repObj); - } - return this.queue.add(repObj); - } - - public boolean remove(ReplicationRequest repObj) { - return queue.remove(repObj); - } - - /** - * Retrieves, but does not remove, the head of this queue, - * or returns {@code null} if this queue is empty. - * - * @return the head of this queue, or {@code null} if this queue is empty - */ - public ReplicationRequest peek() { - return queue.peek(); - } - - /** - * Retrieves and removes the head of this queue (blocking queue). - */ - public ReplicationRequest take() throws InterruptedException { - return queue.take(); - } - - public boolean removeAll(List repObjs) { - return queue.removeAll(repObjs); - } - - public int size() { - return queue.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java deleted file mode 100644 index d40cd9cd17dfa..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.io.Serializable; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -/** - * Wrapper class for hdds replication queue. Implements its natural - * ordering for priority queue. - */ -public class ReplicationRequest implements Comparable, - Serializable { - private final long containerId; - private final int replicationCount; - private final int expecReplicationCount; - private final long timestamp; - - public ReplicationRequest(long containerId, int replicationCount, - long timestamp, int expecReplicationCount) { - this.containerId = containerId; - this.replicationCount = replicationCount; - this.timestamp = timestamp; - this.expecReplicationCount = expecReplicationCount; - } - - public ReplicationRequest(long containerId, int replicationCount, - int expecReplicationCount) { - this(containerId, replicationCount, System.currentTimeMillis(), - expecReplicationCount); - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object - * is less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(ReplicationRequest o) { - if (o == null) { - return 1; - } - if (this == o) { - return 0; - } - int retVal = Integer - .compare(getReplicationCount() - getExpecReplicationCount(), - o.getReplicationCount() - o.getExpecReplicationCount()); - if (retVal != 0) { - return retVal; - } - return Long.compare(getTimestamp(), o.getTimestamp()); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(91, 1011) - .append(getContainerId()) - .toHashCode(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ReplicationRequest that = (ReplicationRequest) o; - return new EqualsBuilder().append(getContainerId(), that.getContainerId()) - .isEquals(); - } - - public long getContainerId() { - return containerId; - } - - public int getReplicationCount() { - return replicationCount; - } - - public long getTimestamp() { - return timestamp; - } - - public int getExpecReplicationCount() { - return expecReplicationCount; - } - - @Override - public String toString() { - return "ReplicationRequest{" + - "containerId=" + containerId + - ", replicationCount=" + replicationCount + - ", expecReplicationCount=" + expecReplicationCount + - ", timestamp=" + timestamp + - '}'; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java deleted file mode 100644 index 934b01e6231df..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.replication; - -/** - * HDDS (Closed) Container replicaton related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java deleted file mode 100644 index 288fa2deb14e9..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ -package org.apache.hadoop.hdds.scm.container.states; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.NavigableSet; -import java.util.TreeSet; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; - -/** - * Each Attribute that we manage for a container is maintained as a map. - *

- * Currently we manage the following attributes for a container. - *

- * 1. StateMap - LifeCycleState -> Set of ContainerIDs - * 2. TypeMap - ReplicationType -> Set of ContainerIDs - * 3. OwnerMap - OwnerNames -> Set of ContainerIDs - * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs - *

- * This means that for a cluster size of 750 PB -- we will have around 150 - * Million containers, if we assume 5GB average container size. - *

- * That implies that these maps will take around 2/3 GB of RAM which will be - * pinned down in the SCM. This is deemed acceptable since we can tune the - * container size --say we make it 10GB average size, then we can deal with a - * cluster size of 1.5 exa bytes with the same metadata in SCMs memory. - *

- * Please note: **This class is not thread safe**. This used to be thread safe, - * while bench marking we found that ContainerStateMap would be taking 5 - * locks for a single container insert. If we remove locks in this class, - * then we are able to perform about 540K operations per second, with the - * locks in this class it goes down to 246K operations per second. Hence we - * are going to rely on ContainerStateMap locks to maintain consistency of - * data in these classes too, since ContainerAttribute is only used by - * ContainerStateMap class. - */ -public class ContainerAttribute { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerAttribute.class); - - private final Map> attributeMap; - private static final NavigableSet EMPTY_SET = Collections - .unmodifiableNavigableSet(new TreeSet<>()); - - /** - * Creates a Container Attribute map from an existing Map. - * - * @param attributeMap - AttributeMap - */ - public ContainerAttribute(Map> attributeMap) { - this.attributeMap = attributeMap; - } - - /** - * Create an empty Container Attribute map. - */ - public ContainerAttribute() { - this.attributeMap = new HashMap<>(); - } - - /** - * Insert or update the value in the Attribute map. - * - * @param key - The key to the set where the ContainerID should exist. - * @param value - Actual Container ID. - * @throws SCMException - on Error - */ - public boolean insert(T key, ContainerID value) throws SCMException { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(value); - - if (attributeMap.containsKey(key)) { - if (attributeMap.get(key).add(value)) { - return true; //we inserted the value as it doesn’t exist in the set. - } else { // Failure indicates that this ContainerID exists in the Set - if (!attributeMap.get(key).remove(value)) { - LOG.error("Failure to remove the object from the Map.Key:{}, " + - "ContainerID: {}", key, value); - throw new SCMException("Failure to remove the object from the Map", - FAILED_TO_CHANGE_CONTAINER_STATE); - } - attributeMap.get(key).add(value); - return true; - } - } else { - // This key does not exist, we need to allocate this key in the map. - // TODO: Replace TreeSet with FoldedTreeSet from HDFS Utils. - // Skipping for now, since FoldedTreeSet does not have implementations - // for headSet and TailSet. We need those calls. - this.attributeMap.put(key, new TreeSet<>()); - // This should not fail, we just allocated this object. - attributeMap.get(key).add(value); - return true; - } - } - - /** - * Returns true if have this bucket in the attribute map. - * - * @param key - Key to lookup - * @return true if we have the key - */ - public boolean hasKey(T key) { - Preconditions.checkNotNull(key); - return this.attributeMap.containsKey(key); - } - - /** - * Returns true if we have the key and the containerID in the bucket. - * - * @param key - Key to the bucket - * @param id - container ID that we want to lookup - * @return true or false - */ - public boolean hasContainerID(T key, ContainerID id) { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(id); - - return this.attributeMap.containsKey(key) && - this.attributeMap.get(key).contains(id); - } - - /** - * Returns true if we have the key and the containerID in the bucket. - * - * @param key - Key to the bucket - * @param id - container ID that we want to lookup - * @return true or false - */ - public boolean hasContainerID(T key, int id) { - return hasContainerID(key, ContainerID.valueof(id)); - } - - /** - * Clears all entries for this key type. - * - * @param key - Key that identifies the Set. - */ - public void clearSet(T key) { - Preconditions.checkNotNull(key); - - if (attributeMap.containsKey(key)) { - attributeMap.get(key).clear(); - } else { - LOG.debug("key: {} does not exist in the attributeMap", key); - } - } - - /** - * Removes a container ID from the set pointed by the key. - * - * @param key - key to identify the set. - * @param value - Container ID - */ - public boolean remove(T key, ContainerID value) { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(value); - - if (attributeMap.containsKey(key)) { - if (!attributeMap.get(key).remove(value)) { - LOG.debug("ContainerID: {} does not exist in the set pointed by " + - "key:{}", value, key); - return false; - } - return true; - } else { - LOG.debug("key: {} does not exist in the attributeMap", key); - return false; - } - } - - /** - * Returns the collection that maps to the given key. - * - * @param key - Key to the bucket. - * @return Underlying Set in immutable form. - */ - public NavigableSet getCollection(T key) { - Preconditions.checkNotNull(key); - - if (this.attributeMap.containsKey(key)) { - return Collections.unmodifiableNavigableSet(this.attributeMap.get(key)); - } - LOG.debug("No such Key. Key {}", key); - return EMPTY_SET; - } - - /** - * Moves a ContainerID from one bucket to another. - * - * @param currentKey - Current Key - * @param newKey - newKey - * @param value - ContainerID - * @throws SCMException on Error - */ - public void update(T currentKey, T newKey, ContainerID value) - throws SCMException { - Preconditions.checkNotNull(currentKey); - Preconditions.checkNotNull(newKey); - - boolean removed = false; - try { - removed = remove(currentKey, value); - if (!removed) { - throw new SCMException("Unable to find key in the current key bucket", - FAILED_TO_CHANGE_CONTAINER_STATE); - } - insert(newKey, value); - } catch (SCMException ex) { - // if we removed the key, insert it back to original bucket, since the - // next insert failed. - LOG.error("error in update.", ex); - if (removed) { - insert(currentKey, value); - LOG.trace("reinserted the removed key. {}", currentKey); - } - throw ex; - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java deleted file mode 100644 index cd491154291dc..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * Key for the Caching layer for Container Query. - */ -public class ContainerQueryKey { - private final HddsProtos.LifeCycleState state; - private final String owner; - private final HddsProtos.ReplicationFactor factor; - private final HddsProtos.ReplicationType type; - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerQueryKey that = (ContainerQueryKey) o; - - return new EqualsBuilder() - .append(getState(), that.getState()) - .append(getOwner(), that.getOwner()) - .append(getFactor(), that.getFactor()) - .append(getType(), that.getType()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(61, 71) - .append(getState()) - .append(getOwner()) - .append(getFactor()) - .append(getType()) - .toHashCode(); - } - - /** - * Constructor for ContainerQueryKey. - * @param state LifeCycleState - * @param owner - Name of the Owner. - * @param factor Replication Factor. - * @param type - Replication Type. - */ - public ContainerQueryKey(HddsProtos.LifeCycleState state, String owner, - HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationType type) { - this.state = state; - this.owner = owner; - this.factor = factor; - this.type = type; - } - - /** - * Returns the state of containers which this key represents. - * @return LifeCycleState - */ - public HddsProtos.LifeCycleState getState() { - return state; - } - - /** - * Returns the owner of containers which this key represents. - * @return Owner - */ - public String getOwner() { - return owner; - } - - /** - * Returns the replication factor of containers which this key represents. - * @return ReplicationFactor - */ - public HddsProtos.ReplicationFactor getFactor() { - return factor; - } - - /** - * Returns the replication type of containers which this key represents. - * @return ReplicationType - */ - public HddsProtos.ReplicationType getType() { - return type; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java deleted file mode 100644 index 1dac36ef7713e..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * Class that acts as the container state. - */ -public class ContainerState { - private final HddsProtos.ReplicationType type; - private final String owner; - private final HddsProtos.ReplicationFactor replicationFactor; - - /** - * Constructs a Container Key. - * - * @param owner - Container Owners - * @param type - Replication Type. - * @param factor - Replication Factors - */ - public ContainerState(String owner, HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor) { - this.type = type; - this.owner = owner; - this.replicationFactor = factor; - } - - - public HddsProtos.ReplicationType getType() { - return type; - } - - public String getOwner() { - return owner; - } - - public HddsProtos.ReplicationFactor getFactor() { - return replicationFactor; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerState that = (ContainerState) o; - - return new EqualsBuilder() - .append(type, that.type) - .append(owner, that.owner) - .append(replicationFactor, that.replicationFactor) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(137, 757) - .append(type) - .append(owner) - .append(replicationFactor) - .toHashCode(); - } - - @Override - public String toString() { - return "ContainerKey{" + - ", type=" + type + - ", owner=" + owner + - ", replicationFactor=" + replicationFactor + - '}'; - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java deleted file mode 100644 index 880a715f6bcb3..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ /dev/null @@ -1,569 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import java.util.HashSet; -import java.util.Set; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.NavigableSet; -import java.util.TreeSet; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .CONTAINER_EXISTS; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_FIND_CONTAINER; - -/** - * Container State Map acts like a unified map for various attributes that are - * used to select containers when we need allocated blocks. - *

- * This class provides the ability to query 5 classes of attributes. They are - *

- * 1. LifeCycleStates - LifeCycle States of container describe in which state - * a container is. For example, a container needs to be in Open State for a - * client to able to write to it. - *

- * 2. Owners - Each instance of Name service, for example, Namenode of HDFS or - * Ozone Manager (OM) of Ozone or CBlockServer -- is an owner. It is - * possible to have many OMs for a Ozone cluster and only one SCM. But SCM - * keeps the data from each OM in separate bucket, never mixing them. To - * write data, often we have to find all open containers for a specific owner. - *

- * 3. ReplicationType - The clients are allowed to specify what kind of - * replication pipeline they want to use. Each Container exists on top of a - * pipeline, so we need to get ReplicationType that is specified by the user. - *

- * 4. ReplicationFactor - The replication factor represents how many copies - * of data should be made, right now we support 2 different types, ONE - * Replica and THREE Replica. User can specify how many copies should be made - * for a ozone key. - *

- * The most common access pattern of this class is to select a container based - * on all these parameters, for example, when allocating a block we will - * select a container that belongs to user1, with Ratis replication which can - * make 3 copies of data. The fact that we will look for open containers by - * default and if we cannot find them we will add new containers. - */ -public class ContainerStateMap { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerStateMap.class); - - private final ContainerAttribute lifeCycleStateMap; - private final ContainerAttribute ownerMap; - private final ContainerAttribute factorMap; - private final ContainerAttribute typeMap; - - private final Map containerMap; - // Map to hold replicas of given container. - private final Map> contReplicaMap; - private final static NavigableSet EMPTY_SET = - Collections.unmodifiableNavigableSet(new TreeSet<>()); - private final Map> resultCache; - - // Container State Map lock should be held before calling into - // Update ContainerAttributes. The consistency of ContainerAttributes is - // protected by this lock. - private final ReadWriteLock lock; - - /** - * Create a ContainerStateMap. - */ - public ContainerStateMap() { - lifeCycleStateMap = new ContainerAttribute<>(); - ownerMap = new ContainerAttribute<>(); - factorMap = new ContainerAttribute<>(); - typeMap = new ContainerAttribute<>(); - containerMap = new HashMap<>(); - lock = new ReentrantReadWriteLock(); - contReplicaMap = new HashMap<>(); -// new InstrumentedLock(getClass().getName(), LOG, -// new ReentrantLock(), -// 1000, -// 300)); - resultCache = new ConcurrentHashMap<>(); - } - - /** - * Adds a ContainerInfo Entry in the ContainerStateMap. - * - * @param info - container info - * @throws SCMException - throws if create failed. - */ - public void addContainer(ContainerInfo info) - throws SCMException { - Preconditions.checkNotNull(info, "Container Info cannot be null"); - Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0, - "ExpectedReplicaCount should be greater than 0"); - - lock.writeLock().lock(); - try { - ContainerID id = ContainerID.valueof(info.getContainerID()); - if (containerMap.putIfAbsent(id, info) != null) { - LOG.debug("Duplicate container ID detected. {}", id); - throw new - SCMException("Duplicate container ID detected.", - CONTAINER_EXISTS); - } - - lifeCycleStateMap.insert(info.getState(), id); - ownerMap.insert(info.getOwner(), id); - factorMap.insert(info.getReplicationFactor(), id); - typeMap.insert(info.getReplicationType(), id); - - // Flush the cache of this container type, will be added later when - // get container queries are executed. - flushCache(info); - LOG.trace("Created container with {} successfully.", id); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the latest state of Container from SCM's Container State Map. - * - * @param info - ContainerInfo - * @return ContainerInfo - */ - public ContainerInfo getContainerInfo(ContainerInfo info) { - return getContainerInfo(info.getContainerID()); - } - - /** - * Returns the latest state of Container from SCM's Container State Map. - * - * @param containerID - int - * @return container info, if found. - */ - public ContainerInfo getContainerInfo(long containerID) { - return getContainerInfo(ContainerID.valueof(containerID)); - } - - /** - * Returns the latest state of Container from SCM's Container State Map. - * - * @param containerID - ContainerID - * @return container info, if found. - */ - public ContainerInfo getContainerInfo(ContainerID containerID) { - lock.readLock().lock(); - try { - return containerMap.get(containerID); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the latest list of DataNodes where replica for given containerId - * exist. Throws an SCMException if no entry is found for given containerId. - * - * @param containerID - * @return Set - */ - public Set getContainerReplicas(ContainerID containerID) - throws SCMException { - Preconditions.checkNotNull(containerID); - lock.readLock().lock(); - try { - if (contReplicaMap.containsKey(containerID)) { - return Collections - .unmodifiableSet(contReplicaMap.get(containerID)); - } - } finally { - lock.readLock().unlock(); - } - throw new SCMException( - "No entry exist for containerId: " + containerID + " in replica map.", - ResultCodes.NO_REPLICA_FOUND); - } - - /** - * Adds given datanodes as nodes where replica for given containerId exist. - * Logs a debug entry if a datanode is already added as replica for given - * ContainerId. - * - * @param containerID - * @param dnList - */ - public void addContainerReplica(ContainerID containerID, - DatanodeDetails... dnList) { - Preconditions.checkNotNull(containerID); - lock.writeLock().lock(); - try { - for (DatanodeDetails dn : dnList) { - Preconditions.checkNotNull(dn); - if (contReplicaMap.containsKey(containerID)) { - if(!contReplicaMap.get(containerID).add(dn)) { - LOG.debug("ReplicaMap already contains entry for container Id: " - + "{},DataNode: {}", containerID, dn); - } - } else { - Set dnSet = new HashSet<>(); - dnSet.add(dn); - contReplicaMap.put(containerID, dnSet); - } - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Remove a container Replica for given DataNode. - * - * @param containerID - * @param dn - * @return True of dataNode is removed successfully else false. - */ - public boolean removeContainerReplica(ContainerID containerID, - DatanodeDetails dn) throws SCMException { - Preconditions.checkNotNull(containerID); - Preconditions.checkNotNull(dn); - - lock.writeLock().lock(); - try { - if (contReplicaMap.containsKey(containerID)) { - return contReplicaMap.get(containerID).remove(dn); - } - } finally { - lock.writeLock().unlock(); - } - throw new SCMException( - "No entry exist for containerId: " + containerID + " in replica map.", - ResultCodes.FAILED_TO_FIND_CONTAINER); - } - - @VisibleForTesting - public static Logger getLOG() { - return LOG; - } - - /** - * Returns the full container Map. - * - * @return - Map - */ - public Map getContainerMap() { - lock.readLock().lock(); - try { - return Collections.unmodifiableMap(containerMap); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Just update the container State. - * @param info ContainerInfo. - */ - public void updateContainerInfo(ContainerInfo info) throws SCMException { - Preconditions.checkNotNull(info); - ContainerInfo currentInfo = null; - lock.writeLock().lock(); - try { - currentInfo = containerMap.get( - ContainerID.valueof(info.getContainerID())); - - if (currentInfo == null) { - throw new SCMException("No such container.", FAILED_TO_FIND_CONTAINER); - } - flushCache(info, currentInfo); - containerMap.put(info.containerID(), info); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Update the State of a container. - * - * @param info - ContainerInfo - * @param currentState - CurrentState - * @param newState - NewState. - * @throws SCMException - in case of failure. - */ - public void updateState(ContainerInfo info, LifeCycleState currentState, - LifeCycleState newState) throws SCMException { - Preconditions.checkNotNull(currentState); - Preconditions.checkNotNull(newState); - - ContainerID id = new ContainerID(info.getContainerID()); - ContainerInfo currentInfo = null; - - lock.writeLock().lock(); - try { - try { - // Just flush both old and new data sets from the result cache. - ContainerInfo newInfo = new ContainerInfo(info); - newInfo.setState(newState); - flushCache(newInfo, info); - - currentInfo = containerMap.get(id); - - if (currentInfo == null) { - throw new - SCMException("No such container.", FAILED_TO_FIND_CONTAINER); - } - // We are updating two places before this update is done, these can - // fail independently, since the code needs to handle it. - - // We update the attribute map, if that fails it will throw an - // exception, so no issues, if we are successful, we keep track of the - // fact that we have updated the lifecycle state in the map, and update - // the container state. If this second update fails, we will attempt to - // roll back the earlier change we did. If the rollback fails, we can - // be in an inconsistent state, - - info.setState(newState); - containerMap.put(id, info); - lifeCycleStateMap.update(currentState, newState, id); - LOG.trace("Updated the container {} to new state. Old = {}, new = " + - "{}", id, currentState, newState); - } catch (SCMException ex) { - LOG.error("Unable to update the container state. {}", ex); - // we need to revert the change in this attribute since we are not - // able to update the hash table. - LOG.info("Reverting the update to lifecycle state. Moving back to " + - "old state. Old = {}, Attempted state = {}", currentState, - newState); - - containerMap.put(id, currentInfo); - - // if this line throws, the state map can be in an inconsistent - // state, since we will have modified the attribute by the - // container state will not in sync since we were not able to put - // that into the hash table. - lifeCycleStateMap.update(newState, currentState, id); - - throw new SCMException("Updating the container map failed.", ex, - FAILED_TO_CHANGE_CONTAINER_STATE); - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns A list of containers owned by a name service. - * - * @param ownerName - Name of the NameService. - * @return - NavigableSet of ContainerIDs. - */ - NavigableSet getContainerIDsByOwner(String ownerName) { - Preconditions.checkNotNull(ownerName); - lock.readLock().lock(); - try { - return ownerMap.getCollection(ownerName); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns Containers in the System by the Type. - * - * @param type - Replication type -- StandAlone, Ratis etc. - * @return NavigableSet - */ - NavigableSet getContainerIDsByType(ReplicationType type) { - Preconditions.checkNotNull(type); - lock.readLock().lock(); - try { - return typeMap.getCollection(type); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns Containers by replication factor. - * - * @param factor - Replication Factor. - * @return NavigableSet. - */ - NavigableSet getContainerIDsByFactor(ReplicationFactor factor) { - Preconditions.checkNotNull(factor); - lock.readLock().lock(); - try { - return factorMap.getCollection(factor); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns Containers by State. - * - * @param state - State - Open, Closed etc. - * @return List of containers by state. - */ - public NavigableSet getContainerIDsByState( - LifeCycleState state) { - Preconditions.checkNotNull(state); - lock.readLock().lock(); - try { - return lifeCycleStateMap.getCollection(state); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Gets the containers that matches the following filters. - * - * @param state - LifeCycleState - * @param owner - Owner - * @param factor - Replication Factor - * @param type - Replication Type - * @return ContainerInfo or Null if not container satisfies the criteria. - */ - public NavigableSet getMatchingContainerIDs( - LifeCycleState state, String owner, - ReplicationFactor factor, ReplicationType type) { - - Preconditions.checkNotNull(state, "State cannot be null"); - Preconditions.checkNotNull(owner, "Owner cannot be null"); - Preconditions.checkNotNull(factor, "Factor cannot be null"); - Preconditions.checkNotNull(type, "Type cannot be null"); - - lock.readLock().lock(); - try { - ContainerQueryKey queryKey = - new ContainerQueryKey(state, owner, factor, type); - if(resultCache.containsKey(queryKey)){ - return resultCache.get(queryKey); - } - - // If we cannot meet any one condition we return EMPTY_SET immediately. - // Since when we intersect these sets, the result will be empty if any - // one is empty. - NavigableSet stateSet = - lifeCycleStateMap.getCollection(state); - if (stateSet.size() == 0) { - return EMPTY_SET; - } - - NavigableSet ownerSet = ownerMap.getCollection(owner); - if (ownerSet.size() == 0) { - return EMPTY_SET; - } - - NavigableSet factorSet = factorMap.getCollection(factor); - if (factorSet.size() == 0) { - return EMPTY_SET; - } - - NavigableSet typeSet = typeMap.getCollection(type); - if (typeSet.size() == 0) { - return EMPTY_SET; - } - - - // if we add more constraints we will just add those sets here.. - NavigableSet[] sets = sortBySize(stateSet, - ownerSet, factorSet, typeSet); - - NavigableSet currentSet = sets[0]; - // We take the smallest set and intersect against the larger sets. This - // allows us to reduce the lookups to the least possible number. - for (int x = 1; x < sets.length; x++) { - currentSet = intersectSets(currentSet, sets[x]); - } - resultCache.put(queryKey, currentSet); - return currentSet; - } finally { - lock.readLock().unlock(); - } - } - - /** - * Calculates the intersection between sets and returns a new set. - * - * @param smaller - First Set - * @param bigger - Second Set - * @return resultSet which is the intersection of these two sets. - */ - private NavigableSet intersectSets( - NavigableSet smaller, - NavigableSet bigger) { - Preconditions.checkState(smaller.size() <= bigger.size(), - "This function assumes the first set is lesser or equal to second " + - "set"); - NavigableSet resultSet = new TreeSet<>(); - for (ContainerID id : smaller) { - if (bigger.contains(id)) { - resultSet.add(id); - } - } - return resultSet; - } - - /** - * Sorts a list of Sets based on Size. This is useful when we are - * intersecting the sets. - * - * @param sets - varagrs of sets - * @return Returns a sorted array of sets based on the size of the set. - */ - @SuppressWarnings("unchecked") - private NavigableSet[] sortBySize( - NavigableSet... sets) { - for (int x = 0; x < sets.length - 1; x++) { - for (int y = 0; y < sets.length - x - 1; y++) { - if (sets[y].size() > sets[y + 1].size()) { - NavigableSet temp = sets[y]; - sets[y] = sets[y + 1]; - sets[y + 1] = temp; - } - } - } - return sets; - } - - private void flushCache(ContainerInfo... containerInfos) { - for (ContainerInfo containerInfo : containerInfos) { - ContainerQueryKey key = new ContainerQueryKey(containerInfo.getState(), - containerInfo.getOwner(), containerInfo.getReplicationFactor(), - containerInfo.getReplicationType()); - resultCache.remove(key); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java deleted file mode 100644 index 8ad1c8b842f7c..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * Container States package. - */ -package org.apache.hadoop.hdds.scm.container.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java deleted file mode 100644 index 77b87132a4ad2..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.events; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler - .CloseContainerStatus; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler - .ReplicationStatus; -import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler - .CloseContainerRetryableReq; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .PipelineReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .PipelineActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .CommandStatusReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager - .ReplicationCompleted; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest; - -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.TypedEvent; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; - -/** - * Class that acts as the namespace for all SCM Events. - */ -public final class SCMEvents { - - /** - * NodeReports are sent out by Datanodes. This report is received by - * SCMDatanodeHeartbeatDispatcher and NodeReport Event is generated. - */ - public static final TypedEvent NODE_REPORT = - new TypedEvent<>(NodeReportFromDatanode.class, "Node_Report"); - - /** - * Event generated on DataNode registration. - */ - public static final TypedEvent - NODE_REGISTRATION_CONT_REPORT = new TypedEvent<>( - NodeRegistrationContainerReport.class, - "Node_Registration_Container_Report"); - - /** - * ContainerReports are send out by Datanodes. This report is received by - * SCMDatanodeHeartbeatDispatcher and Container_Report Event is generated. - */ - public static final TypedEvent CONTAINER_REPORT = - new TypedEvent<>(ContainerReportFromDatanode.class, "Container_Report"); - - /** - * ContainerActions are sent by Datanode. This event is received by - * SCMDatanodeHeartbeatDispatcher and CONTAINER_ACTIONS event is generated. - */ - public static final TypedEvent - CONTAINER_ACTIONS = new TypedEvent<>(ContainerActionsFromDatanode.class, - "Container_Actions"); - - /** - * PipelineReports are send out by Datanodes. This report is received by - * SCMDatanodeHeartbeatDispatcher and Pipeline_Report Event is generated. - */ - public static final TypedEvent PIPELINE_REPORT = - new TypedEvent<>(PipelineReportFromDatanode.class, "Pipeline_Report"); - - /** - * PipelineActions are sent by Datanode. This event is received by - * SCMDatanodeHeartbeatDispatcher and PIPELINE_ACTIONS event is generated. - */ - public static final TypedEvent - PIPELINE_ACTIONS = new TypedEvent<>(PipelineActionsFromDatanode.class, - "Pipeline_Actions"); - - /** - * Pipeline close event are triggered to close pipeline because of failure, - * stale node, decommissioning etc. - */ - public static final TypedEvent - PIPELINE_CLOSE = new TypedEvent<>(PipelineID.class, - "Pipeline_Close"); - - /** - * A Command status report will be sent by datanodes. This repoort is received - * by SCMDatanodeHeartbeatDispatcher and CommandReport event is generated. - */ - public static final TypedEvent - CMD_STATUS_REPORT = - new TypedEvent<>(CommandStatusReportFromDatanode.class, - "Cmd_Status_Report"); - - /** - * When ever a command for the Datanode needs to be issued by any component - * inside SCM, a Datanode_Command event is generated. NodeManager listens to - * these events and dispatches them to Datanode for further processing. - */ - public static final Event DATANODE_COMMAND = - new TypedEvent<>(CommandForDatanode.class, "Datanode_Command"); - - public static final TypedEvent - RETRIABLE_DATANODE_COMMAND = - new TypedEvent<>(CommandForDatanode.class, "Retriable_Datanode_Command"); - - /** - * A Close Container Event can be triggered under many condition. Some of them - * are: 1. A Container is full, then we stop writing further information to - * that container. DN's let SCM know that current state and sends a - * informational message that allows SCM to close the container. - *

- * 2. If a pipeline is open; for example Ratis; if a single node fails, we - * will proactively close these containers. - *

- * Once a command is dispatched to DN, we will also listen to updates from the - * datanode which lets us know that this command completed or timed out. - */ - public static final TypedEvent CLOSE_CONTAINER = - new TypedEvent<>(ContainerID.class, "Close_Container"); - - /** - * A CLOSE_CONTAINER_RETRYABLE_REQ will be triggered by - * CloseContainerEventHandler after sending a SCMCommand to DataNode. - * CloseContainerWatcher will track this event. Watcher will be responsible - * for retrying it in event of failure or timeout. - */ - public static final TypedEvent - CLOSE_CONTAINER_RETRYABLE_REQ = new TypedEvent<>( - CloseContainerRetryableReq.class, "Close_Container_Retryable"); - - /** - * This event will be triggered whenever a new datanode is registered with - * SCM. - */ - public static final TypedEvent NEW_NODE = - new TypedEvent<>(DatanodeDetails.class, "New_Node"); - - /** - * This event will be triggered whenever a datanode is moved from healthy to - * stale state. - */ - public static final TypedEvent STALE_NODE = - new TypedEvent<>(DatanodeDetails.class, "Stale_Node"); - - /** - * This event will be triggered whenever a datanode is moved from stale to - * dead state. - */ - public static final TypedEvent DEAD_NODE = - new TypedEvent<>(DatanodeDetails.class, "Dead_Node"); - - /** - * This event will be triggered by CommandStatusReportHandler whenever a - * status for Replication SCMCommand is received. - */ - public static final Event REPLICATION_STATUS = new - TypedEvent<>(ReplicationStatus.class, "Replicate_Command_Status"); - /** - * This event will be triggered by CommandStatusReportHandler whenever a - * status for CloseContainer SCMCommand is received. - */ - public static final Event - CLOSE_CONTAINER_STATUS = - new TypedEvent<>(CloseContainerStatus.class, - "Close_Container_Command_Status"); - /** - * This event will be triggered by CommandStatusReportHandler whenever a - * status for DeleteBlock SCMCommand is received. - */ - public static final TypedEvent - DELETE_BLOCK_STATUS = - new TypedEvent<>(CommandStatusReportHandler.DeleteBlockStatus.class, - "Delete_Block_Status"); - - /** - * This event will be triggered while processing container reports from DN - * when deleteTransactionID of container in report mismatches with the - * deleteTransactionID on SCM. - */ - public static final Event PENDING_DELETE_STATUS = - new TypedEvent<>(PendingDeleteStatusList.class, "Pending_Delete_Status"); - - /** - * This is the command for ReplicationManager to handle under/over - * replication. Sent by the ContainerReportHandler after processing the - * heartbeat. - */ - public static final TypedEvent REPLICATE_CONTAINER = - new TypedEvent<>(ReplicationRequest.class); - - /** - * This event is sent by the ReplicaManager to the - * ReplicationCommandWatcher to track the in-progress replication. - */ - public static final TypedEvent - TRACK_REPLICATE_COMMAND = - new TypedEvent<>(ReplicationManager.ReplicationRequestToRepeat.class); - /** - * This event comes from the Heartbeat dispatcher (in fact from the - * datanode) to notify the scm that the replication is done. This is - * received by the replicate command watcher to mark in-progress task as - * finished. -

- * TODO: Temporary event, should be replaced by specific Heartbeat - * ActionRequred event. - */ - public static final TypedEvent REPLICATION_COMPLETE = - new TypedEvent<>(ReplicationCompleted.class); - - /** - * Signal for all the components (but especially for the replication - * manager and container report handler) that the replication could be - * started. Should be send only if (almost) all the container state are - * available from the datanodes. - */ - public static final TypedEvent START_REPLICATION = - new TypedEvent<>(Boolean.class); - public static final TypedEvent CHILL_MODE_STATUS = - new TypedEvent<>(Boolean.class); - - /** - * Private Ctor. Never Constructed. - */ - private SCMEvents() { - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java deleted file mode 100644 index 46181a3eb5f45..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Events Package contains all the Events used by SCM internally to - * communicate between different sub-systems that make up SCM. - */ -package org.apache.hadoop.hdds.scm.events; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java deleted file mode 100644 index dae0b06505528..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; - -import java.io.IOException; - -/** - * Exception thrown by SCM. - */ -public class SCMException extends IOException { - private final ResultCodes result; - - /** - * Constructs an {@code IOException} with {@code null} - * as its error detail message. - */ - public SCMException(ResultCodes result) { - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - */ - public SCMException(String message, ResultCodes result) { - super(message); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message - * and cause. - *

- *

Note that the detail message associated with {@code cause} is - * not automatically incorporated into this exception's detail - * message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public SCMException(String message, Throwable cause, ResultCodes result) { - super(message, cause); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified cause and a - * detail message of {@code (cause==null ? null : cause.toString())} - * (which typically contains the class and detail message of {@code cause}). - * This constructor is useful for IO exceptions that are little more - * than wrappers for other throwables. - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public SCMException(Throwable cause, ResultCodes result) { - super(cause); - this.result = result; - } - - /** - * Returns resultCode. - * @return ResultCode - */ - public ResultCodes getResult() { - return result; - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ResultCodes { - SUCCEESS, - FAILED_TO_LOAD_NODEPOOL, - FAILED_TO_FIND_NODE_IN_POOL, - FAILED_TO_FIND_HEALTHY_NODES, - FAILED_TO_FIND_NODES_WITH_SPACE, - FAILED_TO_FIND_SUITABLE_NODE, - INVALID_CAPACITY, - INVALID_BLOCK_SIZE, - CHILL_MODE_EXCEPTION, - FAILED_TO_LOAD_OPEN_CONTAINER, - FAILED_TO_ALLOCATE_CONTAINER, - FAILED_TO_CHANGE_CONTAINER_STATE, - FAILED_TO_CHANGE_PIPELINE_STATE, - CONTAINER_EXISTS, - FAILED_TO_FIND_CONTAINER, - FAILED_TO_FIND_CONTAINER_WITH_SPACE, - BLOCK_EXISTS, - FAILED_TO_FIND_BLOCK, - IO_EXCEPTION, - UNEXPECTED_CONTAINER_STATE, - SCM_NOT_INITIALIZED, - DUPLICATE_DATANODE, - NO_SUCH_DATANODE, - NO_REPLICA_FOUND, - FAILED_TO_FIND_ACTIVE_PIPELINE - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java deleted file mode 100644 index 7b69310c23966..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; -// Exceptions thrown by SCM. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java deleted file mode 100644 index 996478caaaf50..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; - -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * Command Queue is queue of commands for the datanode. - *

- * Node manager, container Manager and Ozone managers can queue commands for - * datanodes into this queue. These commands will be send in the order in which - * there where queued. - */ -public class CommandQueue { - // This list is used as default return value. - private static final List DEFAULT_LIST = new LinkedList<>(); - private final Map commandMap; - private final Lock lock; - private long commandsInQueue; - - /** - * Returns number of commands in queue. - * @return Command Count. - */ - public long getCommandsInQueue() { - return commandsInQueue; - } - - /** - * Constructs a Command Queue. - * TODO : Add a flusher thread that throws away commands older than a certain - * time period. - */ - public CommandQueue() { - commandMap = new HashMap<>(); - lock = new ReentrantLock(); - commandsInQueue = 0; - } - - /** - * This function is used only for test purposes. - */ - @VisibleForTesting - public void clear() { - lock.lock(); - try { - commandMap.clear(); - commandsInQueue = 0; - } finally { - lock.unlock(); - } - } - - /** - * Returns a list of Commands for the datanode to execute, if we have no - * commands returns a empty list otherwise the current set of - * commands are returned and command map set to empty list again. - * - * @param datanodeUuid Datanode UUID - * @return List of SCM Commands. - */ - @SuppressWarnings("unchecked") - List getCommand(final UUID datanodeUuid) { - lock.lock(); - try { - Commands cmds = commandMap.remove(datanodeUuid); - List cmdList = null; - if(cmds != null) { - cmdList = cmds.getCommands(); - commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0; - // A post condition really. - Preconditions.checkState(commandsInQueue >= 0); - } - return cmds == null ? DEFAULT_LIST : cmdList; - } finally { - lock.unlock(); - } - } - - /** - * Adds a Command to the SCM Queue to send the command to container. - * - * @param datanodeUuid DatanodeDetails.Uuid - * @param command - Command - */ - public void addCommand(final UUID datanodeUuid, final SCMCommand - command) { - lock.lock(); - try { - if (commandMap.containsKey(datanodeUuid)) { - commandMap.get(datanodeUuid).add(command); - } else { - commandMap.put(datanodeUuid, new Commands(command)); - } - commandsInQueue++; - } finally { - lock.unlock(); - } - } - - /** - * Class that stores commands for a datanode. - */ - private static class Commands { - private long updateTime; - private long readTime; - private List commands; - - /** - * Constructs a Commands class. - */ - Commands() { - commands = new LinkedList<>(); - updateTime = 0; - readTime = 0; - } - - /** - * Creates the object and populates with the command. - * @param command command to add to queue. - */ - Commands(SCMCommand command) { - this(); - this.add(command); - } - - /** - * Gets the last time the commands for this node was updated. - * @return Time stamp - */ - public long getUpdateTime() { - return updateTime; - } - - /** - * Gets the last read time. - * @return last time when these commands were read from this queue. - */ - public long getReadTime() { - return readTime; - } - - /** - * Adds a command to the list. - * - * @param command SCMCommand - */ - public void add(SCMCommand command) { - this.commands.add(command); - updateTime = Time.monotonicNow(); - } - - /** - * Returns the commands for this datanode. - * @return command list. - */ - public List getCommands() { - List temp = this.commands; - this.commands = new LinkedList<>(); - readTime = Time.monotonicNow(); - return temp; - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java deleted file mode 100644 index 26b8b95b0400e..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.util.Time; - -import java.util.List; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * This class extends the primary identifier of a Datanode with ephemeral - * state, eg last reported time, usage information etc. - */ -public class DatanodeInfo extends DatanodeDetails { - - private final ReadWriteLock lock; - - private volatile long lastHeartbeatTime; - private long lastStatsUpdatedTime; - - // If required we can dissect StorageReportProto and store the raw data - private List storageReports; - - /** - * Constructs DatanodeInfo from DatanodeDetails. - * - * @param datanodeDetails Details about the datanode - */ - public DatanodeInfo(DatanodeDetails datanodeDetails) { - super(datanodeDetails); - lock = new ReentrantReadWriteLock(); - lastHeartbeatTime = Time.monotonicNow(); - } - - /** - * Updates the last heartbeat time with current time. - */ - public void updateLastHeartbeatTime() { - try { - lock.writeLock().lock(); - lastHeartbeatTime = Time.monotonicNow(); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the last heartbeat time. - * - * @return last heartbeat time. - */ - public long getLastHeartbeatTime() { - try { - lock.readLock().lock(); - return lastHeartbeatTime; - } finally { - lock.readLock().unlock(); - } - } - - /** - * Updates the datanode storage reports. - * - * @param reports list of storage report - */ - public void updateStorageReports(List reports) { - try { - lock.writeLock().lock(); - lastStatsUpdatedTime = Time.monotonicNow(); - storageReports = reports; - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the storage reports associated with this datanode. - * - * @return list of storage report - */ - public List getStorageReports() { - try { - lock.readLock().lock(); - return storageReports; - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the last updated time of datanode info. - * @return the last updated time of datanode info. - */ - public long getLastStatsUpdatedTime() { - return lastStatsUpdatedTime; - } - - @Override - public int hashCode() { - return super.hashCode(); - } - - @Override - public boolean equals(Object obj) { - return super.equals(obj); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java deleted file mode 100644 index 17edf9ea17ac7..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import java.util.Set; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles Dead Node event. - */ -public class DeadNodeHandler implements EventHandler { - - private final ContainerStateManager containerStateManager; - - private final NodeManager nodeManager; - - private static final Logger LOG = - LoggerFactory.getLogger(DeadNodeHandler.class); - - public DeadNodeHandler(NodeManager nodeManager, - ContainerStateManager containerStateManager) { - this.containerStateManager = containerStateManager; - this.nodeManager = nodeManager; - } - - @Override - public void onMessage(DatanodeDetails datanodeDetails, - EventPublisher publisher) { - nodeManager.processDeadNode(datanodeDetails.getUuid()); - - Set containers = - nodeManager.getContainers(datanodeDetails.getUuid()); - if (containers == null) { - LOG.info("There's no containers in dead datanode {}, no replica will be" - + " removed from the in-memory state.", datanodeDetails.getUuid()); - return; - } - LOG.info( - "Datanode {} is dead. Removing replications from the in-memory state.", - datanodeDetails.getUuid()); - for (ContainerID container : containers) { - try { - try { - containerStateManager.removeContainerReplica(container, - datanodeDetails); - } catch (SCMException ex) { - LOG.info("DataNode {} doesn't have replica for container {}.", - datanodeDetails.getUuid(), container.getId()); - } - - if (!containerStateManager.isOpen(container)) { - ReplicationRequest replicationRequest = - containerStateManager.checkReplicationState(container); - - if (replicationRequest != null) { - publisher.fireEvent(SCMEvents.REPLICATE_CONTAINER, - replicationRequest); - } - } - } catch (SCMException e) { - LOG.error("Can't remove container from containerStateMap {}", container - .getId(), e); - } - } - } - - /** - * Returns logger. - * */ - public static Logger getLogger() { - return LOG; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java deleted file mode 100644 index 780aa2b9e7e68..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import java.util.Collections; - -/** - * Handles New Node event. - */ -public class NewNodeHandler implements EventHandler { - - private final NodeManager nodeManager; - - public NewNodeHandler(NodeManager nodeManager) { - this.nodeManager = nodeManager; - } - - @Override - public void onMessage(DatanodeDetails datanodeDetails, - EventPublisher publisher) { - try { - nodeManager.addDatanodeInContainerMap(datanodeDetails.getUuid(), - Collections.emptySet()); - } catch (SCMException e) { - // TODO: log exception message. - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java deleted file mode 100644 index 0dc1a0c5d6c2f..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import java.io.Closeable; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** - * A node manager supports a simple interface for managing a datanode. - *

- * 1. A datanode registers with the NodeManager. - *

- * 2. If the node is allowed to register, we add that to the nodes that we need - * to keep track of. - *

- * 3. A heartbeat is made by the node at a fixed frequency. - *

- * 4. A node can be in any of these 4 states: {HEALTHY, STALE, DEAD, - * DECOMMISSIONED} - *

- * HEALTHY - It is a datanode that is regularly heartbeating us. - * - * STALE - A datanode for which we have missed few heart beats. - * - * DEAD - A datanode that we have not heard from for a while. - * - * DECOMMISSIONED - Someone told us to remove this node from the tracking - * list, by calling removeNode. We will throw away this nodes info soon. - */ -public interface NodeManager extends StorageContainerNodeProtocol, - EventHandler, NodeManagerMXBean, Closeable { - /** - * Removes a data node from the management of this Node Manager. - * - * @param node - DataNode. - * @throws NodeNotFoundException - */ - void removeNode(DatanodeDetails node) throws NodeNotFoundException; - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * @param nodeState - State of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - List getNodes(NodeState nodeState); - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * @param nodeState - State of the node - * @return int -- count - */ - int getNodeCount(NodeState nodeState); - - /** - * Get all datanodes known to SCM. - * - * @return List of DatanodeDetails known to SCM. - */ - List getAllNodes(); - - /** - * Chill mode is the period when node manager waits for a minimum - * configured number of datanodes to report in. This is called chill mode - * to indicate the period before node manager gets into action. - * - * Forcefully exits the chill mode, even if we have not met the minimum - * criteria of the nodes reporting in. - */ - void forceExitChillMode(); - - /** - * Puts the node manager into manual chill mode. - */ - void enterChillMode(); - - /** - * Brings node manager out of manual chill mode. - */ - void exitChillMode(); - - /** - * Returns the aggregated node stats. - * @return the aggregated node stats. - */ - SCMNodeStat getStats(); - - /** - * Return a map of node stats. - * @return a map of individual node stats (live/stale but not dead). - */ - Map getNodeStats(); - - /** - * Return the node stat of the specified datanode. - * @param datanodeDetails DatanodeDetails. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails); - - /** - * Returns the node state of a specific node. - * @param datanodeDetails DatanodeDetails - * @return Healthy/Stale/Dead. - */ - NodeState getNodeState(DatanodeDetails datanodeDetails); - - /** - * Get set of pipelines a datanode is part of. - * @param dnId - datanodeID - * @return Set of PipelineID - */ - Set getPipelineByDnID(UUID dnId); - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - void addPipeline(Pipeline pipeline); - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - void removePipeline(Pipeline pipeline); - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws SCMException - if datanode is not known. For new datanode use - * addDatanodeInContainerMap call. - */ - void setContainersForDatanode(UUID uuid, Set containerIds) - throws SCMException; - - /** - * Process containerReport received from datanode. - * @param uuid - DataonodeID - * @param containerIds - Set of containerIDs - * @return The result after processing containerReport - */ - ReportResult processContainerReport(UUID uuid, - Set containerIds); - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - Set getContainers(UUID uuid); - - /** - * Insert a new datanode with set of containerIDs for containers available - * on it. - * @param uuid - DatanodeID - * @param containerIDs - Set of ContainerIDs - * @throws SCMException - if datanode already exists - */ - void addDatanodeInContainerMap(UUID uuid, Set containerIDs) - throws SCMException; - - /** - * Add a {@link SCMCommand} to the command queue, which are - * handled by HB thread asynchronously. - * @param dnId datanode uuid - * @param command - */ - void addDatanodeCommand(UUID dnId, SCMCommand command); - - /** - * Process node report. - * - * @param dnUuid - * @param nodeReport - */ - void processNodeReport(UUID dnUuid, NodeReportProto nodeReport); - - /** - * Process a dead node event in this Node Manager. - * - * @param dnUuid datanode uuid. - */ - void processDeadNode(UUID dnUuid); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java deleted file mode 100644 index 3ac993b77d9f9..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.classification.InterfaceAudience; - -import java.util.Map; - -/** - * - * This is the JMX management interface for node manager information. - */ -@InterfaceAudience.Private -public interface NodeManagerMXBean { - /** - * Get the minimum number of nodes to get out of chill mode. - * - * @return int - */ - int getMinimumChillModeNodes(); - - /** - * Returns a chill mode status string. - * @return String - */ - String getChillModeStatus(); - - - /** - * Returns true if node manager is out of chill mode, else false. - * @return true if out of chill mode, else false - */ - boolean isOutOfChillMode(); - - /** - * Get the number of data nodes that in all states. - * - * @return A state to number of nodes that in this state mapping - */ - Map getNodeCount(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java deleted file mode 100644 index 331bfed1ab3ed..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles Node Reports from datanode. - */ -public class NodeReportHandler implements EventHandler { - - private static final Logger LOGGER = LoggerFactory - .getLogger(NodeReportHandler.class); - private final NodeManager nodeManager; - - public NodeReportHandler(NodeManager nodeManager) { - Preconditions.checkNotNull(nodeManager); - this.nodeManager = nodeManager; - } - - @Override - public void onMessage(NodeReportFromDatanode nodeReportFromDatanode, - EventPublisher publisher) { - Preconditions.checkNotNull(nodeReportFromDatanode); - DatanodeDetails dn = nodeReportFromDatanode.getDatanodeDetails(); - Preconditions.checkNotNull(dn, "NodeReport is " - + "missing DatanodeDetails."); - LOGGER.trace("Processing node report for dn: {}", dn); - nodeManager - .processNodeReport(dn.getUuid(), nodeReportFromDatanode.getReport()); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java deleted file mode 100644 index 88f984b67361f..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ /dev/null @@ -1,725 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.states.*; -import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; -import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.*; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.function.Predicate; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; - -/** - * NodeStateManager maintains the state of all the datanodes in the cluster. All - * the node state change should happen only via NodeStateManager. It also - * runs a heartbeat thread which periodically updates the node state. - *

- * The getNode(byState) functions make copy of node maps and then creates a list - * based on that. It should be assumed that these get functions always report - * *stale* information. For example, getting the deadNodeCount followed by - * getNodes(DEAD) could very well produce totally different count. Also - * getNodeCount(HEALTHY) + getNodeCount(DEAD) + getNodeCode(STALE), is not - * guaranteed to add up to the total nodes that we know off. Please treat all - * get functions in this file as a snap-shot of information that is inconsistent - * as soon as you read it. - */ -public class NodeStateManager implements Runnable, Closeable { - - /** - * Node's life cycle events. - */ - private enum NodeLifeCycleEvent { - TIMEOUT, RESTORE, RESURRECT, DECOMMISSION, DECOMMISSIONED - } - - private static final Logger LOG = LoggerFactory - .getLogger(NodeStateManager.class); - - /** - * StateMachine for node lifecycle. - */ - private final StateMachine stateMachine; - /** - * This is the map which maintains the current state of all datanodes. - */ - private final NodeStateMap nodeStateMap; - /** - * Maintains the mapping from node to pipelines a node is part of. - */ - private final Node2PipelineMap node2PipelineMap; - /** - * Maintains the map from node to ContainerIDs for the containers - * available on the node. - */ - private final Node2ContainerMap node2ContainerMap; - /** - * Used for publishing node state change events. - */ - private final EventPublisher eventPublisher; - /** - * Maps the event to be triggered when a node state us updated. - */ - private final Map> state2EventMap; - /** - * ExecutorService used for scheduling heartbeat processing thread. - */ - private final ScheduledExecutorService executorService; - /** - * The frequency in which we have run the heartbeat processing thread. - */ - private final long heartbeatCheckerIntervalMs; - /** - * The timeout value which will be used for marking a datanode as stale. - */ - private final long staleNodeIntervalMs; - /** - * The timeout value which will be used for marking a datanode as dead. - */ - private final long deadNodeIntervalMs; - - /** - * Constructs a NodeStateManager instance with the given configuration. - * - * @param conf Configuration - */ - public NodeStateManager(Configuration conf, EventPublisher eventPublisher) { - this.nodeStateMap = new NodeStateMap(); - this.node2PipelineMap = new Node2PipelineMap(); - this.node2ContainerMap = new Node2ContainerMap(); - this.eventPublisher = eventPublisher; - this.state2EventMap = new HashMap<>(); - initialiseState2EventMap(); - Set finalStates = new HashSet<>(); - finalStates.add(NodeState.DECOMMISSIONED); - this.stateMachine = new StateMachine<>(NodeState.HEALTHY, finalStates); - initializeStateMachine(); - heartbeatCheckerIntervalMs = HddsServerUtil - .getScmheartbeatCheckerInterval(conf); - staleNodeIntervalMs = HddsServerUtil.getStaleNodeInterval(conf); - deadNodeIntervalMs = HddsServerUtil.getDeadNodeInterval(conf); - Preconditions.checkState(heartbeatCheckerIntervalMs > 0, - OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL + " should be greater than 0."); - Preconditions.checkState(staleNodeIntervalMs < deadNodeIntervalMs, - OZONE_SCM_STALENODE_INTERVAL + " should be less than" + - OZONE_SCM_DEADNODE_INTERVAL); - executorService = HadoopExecutors.newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("SCM Heartbeat Processing Thread - %d").build()); - executorService.schedule(this, heartbeatCheckerIntervalMs, - TimeUnit.MILLISECONDS); - } - - /** - * Populates state2event map. - */ - private void initialiseState2EventMap() { - state2EventMap.put(NodeState.STALE, SCMEvents.STALE_NODE); - state2EventMap.put(NodeState.DEAD, SCMEvents.DEAD_NODE); - } - - /* - * - * Node and State Transition Mapping: - * - * State: HEALTHY -------------------> STALE - * Event: TIMEOUT - * - * State: STALE -------------------> DEAD - * Event: TIMEOUT - * - * State: STALE -------------------> HEALTHY - * Event: RESTORE - * - * State: DEAD -------------------> HEALTHY - * Event: RESURRECT - * - * State: HEALTHY -------------------> DECOMMISSIONING - * Event: DECOMMISSION - * - * State: STALE -------------------> DECOMMISSIONING - * Event: DECOMMISSION - * - * State: DEAD -------------------> DECOMMISSIONING - * Event: DECOMMISSION - * - * State: DECOMMISSIONING -------------------> DECOMMISSIONED - * Event: DECOMMISSIONED - * - * Node State Flow - * - * +--------------------------------------------------------+ - * | (RESURRECT) | - * | +--------------------------+ | - * | | (RESTORE) | | - * | | | | - * V V | | - * [HEALTHY]------------------->[STALE]------------------->[DEAD] - * | (TIMEOUT) | (TIMEOUT) | - * | | | - * | | | - * | | | - * | | | - * | (DECOMMISSION) | (DECOMMISSION) | (DECOMMISSION) - * | V | - * +------------------->[DECOMMISSIONING]<----------------+ - * | - * | (DECOMMISSIONED) - * | - * V - * [DECOMMISSIONED] - * - */ - - /** - * Initializes the lifecycle of node state machine. - */ - private void initializeStateMachine() { - stateMachine.addTransition( - NodeState.HEALTHY, NodeState.STALE, NodeLifeCycleEvent.TIMEOUT); - stateMachine.addTransition( - NodeState.STALE, NodeState.DEAD, NodeLifeCycleEvent.TIMEOUT); - stateMachine.addTransition( - NodeState.STALE, NodeState.HEALTHY, NodeLifeCycleEvent.RESTORE); - stateMachine.addTransition( - NodeState.DEAD, NodeState.HEALTHY, NodeLifeCycleEvent.RESURRECT); - stateMachine.addTransition( - NodeState.HEALTHY, NodeState.DECOMMISSIONING, - NodeLifeCycleEvent.DECOMMISSION); - stateMachine.addTransition( - NodeState.STALE, NodeState.DECOMMISSIONING, - NodeLifeCycleEvent.DECOMMISSION); - stateMachine.addTransition( - NodeState.DEAD, NodeState.DECOMMISSIONING, - NodeLifeCycleEvent.DECOMMISSION); - stateMachine.addTransition( - NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONED, - NodeLifeCycleEvent.DECOMMISSIONED); - - } - - /** - * Adds a new node to the state manager. - * - * @param datanodeDetails DatanodeDetails - * - * @throws NodeAlreadyExistsException if the node is already present - */ - public void addNode(DatanodeDetails datanodeDetails) - throws NodeAlreadyExistsException { - nodeStateMap.addNode(datanodeDetails, stateMachine.getInitialState()); - eventPublisher.fireEvent(SCMEvents.NEW_NODE, datanodeDetails); - } - - /** - * Adds a pipeline in the node2PipelineMap. - * @param pipeline - Pipeline to be added - */ - public void addPipeline(Pipeline pipeline) { - node2PipelineMap.addPipeline(pipeline); - } - - /** - * Get information about the node. - * - * @param datanodeDetails DatanodeDetails - * - * @return DatanodeInfo - * - * @throws NodeNotFoundException if the node is not present - */ - public DatanodeInfo getNode(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - return nodeStateMap.getNodeInfo(datanodeDetails.getUuid()); - } - - /** - * Updates the last heartbeat time of the node. - * - * @throws NodeNotFoundException if the node is not present - */ - public void updateLastHeartbeatTime(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - nodeStateMap.getNodeInfo(datanodeDetails.getUuid()) - .updateLastHeartbeatTime(); - } - - /** - * Returns the current state of the node. - * - * @param datanodeDetails DatanodeDetails - * - * @return NodeState - * - * @throws NodeNotFoundException if the node is not present - */ - public NodeState getNodeState(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - return nodeStateMap.getNodeState(datanodeDetails.getUuid()); - } - - /** - * Returns all the node which are in healthy state. - * - * @return list of healthy nodes - */ - public List getHealthyNodes() { - return getNodes(NodeState.HEALTHY); - } - - /** - * Returns all the node which are in stale state. - * - * @return list of stale nodes - */ - public List getStaleNodes() { - return getNodes(NodeState.STALE); - } - - /** - * Returns all the node which are in dead state. - * - * @return list of dead nodes - */ - public List getDeadNodes() { - return getNodes(NodeState.DEAD); - } - - /** - * Returns all the node which are in the specified state. - * - * @param state NodeState - * - * @return list of nodes - */ - public List getNodes(NodeState state) { - List nodes = new LinkedList<>(); - nodeStateMap.getNodes(state).forEach( - uuid -> { - try { - nodes.add(nodeStateMap.getNodeDetails(uuid)); - } catch (NodeNotFoundException e) { - // This should not happen unless someone else other than - // NodeStateManager is directly modifying NodeStateMap and removed - // the node entry after we got the list of UUIDs. - LOG.error("Inconsistent NodeStateMap! " + nodeStateMap); - } - }); - return nodes; - } - - /** - * Returns all the nodes which have registered to NodeStateManager. - * - * @return all the managed nodes - */ - public List getAllNodes() { - List nodes = new LinkedList<>(); - nodeStateMap.getAllNodes().forEach( - uuid -> { - try { - nodes.add(nodeStateMap.getNodeDetails(uuid)); - } catch (NodeNotFoundException e) { - // This should not happen unless someone else other than - // NodeStateManager is directly modifying NodeStateMap and removed - // the node entry after we got the list of UUIDs. - LOG.error("Inconsistent NodeStateMap! " + nodeStateMap); - } - }); - return nodes; - } - - /** - * Gets set of pipelineID a datanode belongs to. - * @param dnId - Datanode ID - * @return Set of PipelineID - */ - public Set getPipelineByDnID(UUID dnId) { - return node2PipelineMap.getPipelines(dnId); - } - - /** - * Returns the count of healthy nodes. - * - * @return healthy node count - */ - public int getHealthyNodeCount() { - return getNodeCount(NodeState.HEALTHY); - } - - /** - * Returns the count of stale nodes. - * - * @return stale node count - */ - public int getStaleNodeCount() { - return getNodeCount(NodeState.STALE); - } - - /** - * Returns the count of dead nodes. - * - * @return dead node count - */ - public int getDeadNodeCount() { - return getNodeCount(NodeState.DEAD); - } - - /** - * Returns the count of nodes in specified state. - * - * @param state NodeState - * - * @return node count - */ - public int getNodeCount(NodeState state) { - return nodeStateMap.getNodeCount(state); - } - - /** - * Returns the count of all nodes managed by NodeStateManager. - * - * @return node count - */ - public int getTotalNodeCount() { - return nodeStateMap.getTotalNodeCount(); - } - - /** - * Removes a node from NodeStateManager. - * - * @param datanodeDetails DatanodeDetails - * - * @throws NodeNotFoundException if the node is not present - */ - public void removeNode(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - nodeStateMap.removeNode(datanodeDetails.getUuid()); - } - - /** - * Returns the current stats of the node. - * - * @param uuid node id - * - * @return SCMNodeStat - * - * @throws NodeNotFoundException if the node is not present - */ - public SCMNodeStat getNodeStat(UUID uuid) throws NodeNotFoundException { - return nodeStateMap.getNodeStat(uuid); - } - - /** - * Returns a unmodifiable copy of nodeStats. - * @return map with node stats. - */ - public Map getNodeStatsMap() { - return nodeStateMap.getNodeStats(); - } - - /** - * Set the stat for the node. - * - * @param uuid node id. - * - * @param newstat new stat that will set to the specify node. - */ - public void setNodeStat(UUID uuid, SCMNodeStat newstat) { - nodeStateMap.setNodeStat(uuid, newstat); - } - - /** - * Remove the current stats of the specify node. - * - * @param uuid node id - * - * @return SCMNodeStat the stat removed from the node. - * - * @throws NodeNotFoundException if the node is not present. - */ - public SCMNodeStat removeNodeStat(UUID uuid) throws NodeNotFoundException { - return nodeStateMap.removeNodeStat(uuid); - } - - /** - * Removes a pipeline from the node2PipelineMap. - * @param pipeline - Pipeline to be removed - */ - public void removePipeline(Pipeline pipeline) { - node2PipelineMap.removePipeline(pipeline); - } - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws SCMException - if datanode is not known. For new datanode use - * addDatanodeInContainerMap call. - */ - public void setContainersForDatanode(UUID uuid, Set containerIds) - throws SCMException { - node2ContainerMap.setContainersForDatanode(uuid, containerIds); - } - - /** - * Process containerReport received from datanode. - * @param uuid - DataonodeID - * @param containerIds - Set of containerIDs - * @return The result after processing containerReport - */ - public ReportResult processContainerReport(UUID uuid, - Set containerIds) { - return node2ContainerMap.processReport(uuid, containerIds); - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - public Set getContainers(UUID uuid) { - return node2ContainerMap.getContainers(uuid); - } - - /** - * Insert a new datanode with set of containerIDs for containers available - * on it. - * @param uuid - DatanodeID - * @param containerIDs - Set of ContainerIDs - * @throws SCMException - if datanode already exists - */ - public void addDatanodeInContainerMap(UUID uuid, - Set containerIDs) throws SCMException { - node2ContainerMap.insertNewDatanode(uuid, containerIDs); - } - - /** - * Move Stale or Dead node to healthy if we got a heartbeat from them. - * Move healthy nodes to stale nodes if it is needed. - * Move Stales node to dead if needed. - * - * @see Thread#run() - */ - @Override - public void run() { - - /* - * - * staleNodeDeadline healthyNodeDeadline - * | | - * Dead | Stale | Healthy - * Node | Node | Node - * Window | Window | Window - * ----------------+----------------------------------+-------------------> - * >>-->> time-line >>-->> - * - * Here is the logic of computing the health of a node. -     * -     * 1. We get the current time and look back that the time -     *  when we got a heartbeat from a node. -     *  -     * 2. If the last heartbeat was within the window of healthy node we mark -     *  it as healthy. -     *  -     * 3. If the last HB Time stamp is longer and falls within the window of -     *  Stale Node time, we will mark it as Stale. -     *  -     * 4. If the last HB time is older than the Stale Window, then the node is -     * marked as dead. - * - * The Processing starts from current time and looks backwards in time. - */ - long processingStartTime = Time.monotonicNow(); - // After this time node is considered to be stale. - long healthyNodeDeadline = processingStartTime - staleNodeIntervalMs; - // After this time node is considered to be dead. - long staleNodeDeadline = processingStartTime - deadNodeIntervalMs; - - Predicate healthyNodeCondition = - (lastHbTime) -> lastHbTime >= healthyNodeDeadline; - // staleNodeCondition is superset of stale and dead node - Predicate staleNodeCondition = - (lastHbTime) -> lastHbTime < healthyNodeDeadline; - Predicate deadNodeCondition = - (lastHbTime) -> lastHbTime < staleNodeDeadline; - try { - for (NodeState state : NodeState.values()) { - List nodes = nodeStateMap.getNodes(state); - for (UUID id : nodes) { - DatanodeInfo node = nodeStateMap.getNodeInfo(id); - switch (state) { - case HEALTHY: - // Move the node to STALE if the last heartbeat time is less than - // configured stale-node interval. - updateNodeState(node, staleNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); - break; - case STALE: - // Move the node to DEAD if the last heartbeat time is less than - // configured dead-node interval. - updateNodeState(node, deadNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); - // Restore the node if we have received heartbeat before configured - // stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESTORE); - break; - case DEAD: - // Resurrect the node if we have received heartbeat before - // configured stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESURRECT); - break; - // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in - // heartbeat processing. - case DECOMMISSIONING: - case DECOMMISSIONED: - default: - } - } - } - } catch (NodeNotFoundException e) { - // This should not happen unless someone else other than - // NodeStateManager is directly modifying NodeStateMap and removed - // the node entry after we got the list of UUIDs. - LOG.error("Inconsistent NodeStateMap! " + nodeStateMap); - } - long processingEndTime = Time.monotonicNow(); - //If we have taken too much time for HB processing, log that information. - if ((processingEndTime - processingStartTime) > - heartbeatCheckerIntervalMs) { - LOG.error("Total time spend processing datanode HB's is greater than " + - "configured values for datanode heartbeats. Please adjust the" + - " heartbeat configs. Time Spend on HB processing: {} seconds " + - "Datanode heartbeat Interval: {} seconds.", - TimeUnit.MILLISECONDS - .toSeconds(processingEndTime - processingStartTime), - heartbeatCheckerIntervalMs); - } - - // we purposefully make this non-deterministic. Instead of using a - // scheduleAtFixedFrequency we will just go to sleep - // and wake up at the next rendezvous point, which is currentTime + - // heartbeatCheckerIntervalMs. This leads to the issue that we are now - // heart beating not at a fixed cadence, but clock tick + time taken to - // work. - // - // This time taken to work can skew the heartbeat processor thread. - // The reason why we don't care is because of the following reasons. - // - // 1. checkerInterval is general many magnitudes faster than datanode HB - // frequency. - // - // 2. if we have too much nodes, the SCM would be doing only HB - // processing, this could lead to SCM's CPU starvation. With this - // approach we always guarantee that HB thread sleeps for a little while. - // - // 3. It is possible that we will never finish processing the HB's in the - // thread. But that means we have a mis-configured system. We will warn - // the users by logging that information. - // - // 4. And the most important reason, heartbeats are not blocked even if - // this thread does not run, they will go into the processing queue. - - if (!Thread.currentThread().isInterrupted() && - !executorService.isShutdown()) { - executorService.schedule(this, heartbeatCheckerIntervalMs, - TimeUnit.MILLISECONDS); - } else { - LOG.info("Current Thread is interrupted, shutting down HB processing " + - "thread for Node Manager."); - } - - } - - /** - * Updates the node state if the condition satisfies. - * - * @param node DatanodeInfo - * @param condition condition to check - * @param state current state of node - * @param lifeCycleEvent NodeLifeCycleEvent to be applied if condition - * matches - * - * @throws NodeNotFoundException if the node is not present - */ - private void updateNodeState(DatanodeInfo node, Predicate condition, - NodeState state, NodeLifeCycleEvent lifeCycleEvent) - throws NodeNotFoundException { - try { - if (condition.test(node.getLastHeartbeatTime())) { - NodeState newState = stateMachine.getNextState(state, lifeCycleEvent); - nodeStateMap.updateNodeState(node.getUuid(), state, newState); - if (state2EventMap.containsKey(newState)) { - eventPublisher.fireEvent(state2EventMap.get(newState), node); - } - } - } catch (InvalidStateTransitionException e) { - LOG.warn("Invalid state transition of node {}." + - " Current state: {}, life cycle event: {}", - node, state, lifeCycleEvent); - } - } - - @Override - public void close() { - executorService.shutdown(); - try { - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - LOG.error("Unable to shutdown NodeStateManager properly."); - } - } catch (InterruptedException e) { - executorService.shutdownNow(); - Thread.currentThread().interrupt(); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java deleted file mode 100644 index 36a6f154ad51f..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ /dev/null @@ -1,599 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.scm.VersionInfo; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto - .ErrorCode; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.IOException; -import java.net.InetAddress; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Maintains information about the Datanodes on SCM side. - *

- * Heartbeats under SCM is very simple compared to HDFS heartbeatManager. - *

- * The getNode(byState) functions make copy of node maps and then creates a list - * based on that. It should be assumed that these get functions always report - * *stale* information. For example, getting the deadNodeCount followed by - * getNodes(DEAD) could very well produce totally different count. Also - * getNodeCount(HEALTHY) + getNodeCount(DEAD) + getNodeCode(STALE), is not - * guaranteed to add up to the total nodes that we know off. Please treat all - * get functions in this file as a snap-shot of information that is inconsistent - * as soon as you read it. - */ -public class SCMNodeManager - implements NodeManager, StorageContainerNodeProtocol { - - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMNodeManager.class); - - private final NodeStateManager nodeStateManager; - // Should we maintain aggregated stats? If this is not frequently used, we - // can always calculate it from nodeStats whenever required. - // Aggregated node stats - private SCMNodeStat scmStat; - // Should we create ChillModeManager and extract all the chill mode logic - // to a new class? - private int chillModeNodeCount; - private final String clusterID; - private final VersionInfo version; - /** - * During start up of SCM, it will enter into chill mode and will be there - * until number of Datanodes registered reaches {@code chillModeNodeCount}. - * This flag is for tracking startup chill mode. - */ - private AtomicBoolean inStartupChillMode; - /** - * Administrator can put SCM into chill mode manually. - * This flag is for tracking manual chill mode. - */ - private AtomicBoolean inManualChillMode; - private final CommandQueue commandQueue; - // Node manager MXBean - private ObjectName nmInfoBean; - - // Node pool manager. - private final StorageContainerManager scmManager; - - /** - * Constructs SCM machine Manager. - */ - public SCMNodeManager(OzoneConfiguration conf, String clusterID, - StorageContainerManager scmManager, EventPublisher eventPublisher) - throws IOException { - this.nodeStateManager = new NodeStateManager(conf, eventPublisher); - this.scmStat = new SCMNodeStat(); - this.clusterID = clusterID; - this.version = VersionInfo.getLatestVersion(); - this.commandQueue = new CommandQueue(); - // TODO: Support this value as a Percentage of known machines. - this.chillModeNodeCount = 1; - this.inStartupChillMode = new AtomicBoolean(true); - this.inManualChillMode = new AtomicBoolean(false); - this.scmManager = scmManager; - LOG.info("Entering startup chill mode."); - registerMXBean(); - } - - private void registerMXBean() { - this.nmInfoBean = MBeans.register("SCMNodeManager", - "SCMNodeManagerInfo", this); - } - - private void unregisterMXBean() { - if(this.nmInfoBean != null) { - MBeans.unregister(this.nmInfoBean); - this.nmInfoBean = null; - } - } - - /** - * Removes a data node from the management of this Node Manager. - * - * @param node - DataNode. - * @throws NodeNotFoundException - */ - @Override - public void removeNode(DatanodeDetails node) throws NodeNotFoundException { - nodeStateManager.removeNode(node); - } - - /** - * Gets all datanodes that are in a certain state. This function works by - * taking a snapshot of the current collection and then returning the list - * from that collection. This means that real map might have changed by the - * time we return this list. - * - * @return List of Datanodes that are known to SCM in the requested state. - */ - @Override - public List getNodes(NodeState nodestate) { - return nodeStateManager.getNodes(nodestate); - } - - /** - * Returns all datanodes that are known to SCM. - * - * @return List of DatanodeDetails - */ - @Override - public List getAllNodes() { - return nodeStateManager.getAllNodes(); - } - - /** - * Get the minimum number of nodes to get out of Chill mode. - * - * @return int - */ - @Override - public int getMinimumChillModeNodes() { - return chillModeNodeCount; - } - - /** - * Sets the Minimum chill mode nodes count, used only in testing. - * - * @param count - Number of nodes. - */ - @VisibleForTesting - public void setMinimumChillModeNodes(int count) { - chillModeNodeCount = count; - } - - /** - * Returns chill mode Status string. - * @return String - */ - @Override - public String getChillModeStatus() { - if (inStartupChillMode.get()) { - return "Still in chill mode, waiting on nodes to report in." + - String.format(" %d nodes reported, minimal %d nodes required.", - nodeStateManager.getTotalNodeCount(), getMinimumChillModeNodes()); - } - if (inManualChillMode.get()) { - return "Out of startup chill mode, but in manual chill mode." + - String.format(" %d nodes have reported in.", - nodeStateManager.getTotalNodeCount()); - } - return "Out of chill mode." + - String.format(" %d nodes have reported in.", - nodeStateManager.getTotalNodeCount()); - } - - /** - * Forcefully exits the chill mode even if we have not met the minimum - * criteria of exiting the chill mode. This will exit from both startup - * and manual chill mode. - */ - @Override - public void forceExitChillMode() { - if(inStartupChillMode.get()) { - LOG.info("Leaving startup chill mode."); - inStartupChillMode.set(false); - } - if(inManualChillMode.get()) { - LOG.info("Leaving manual chill mode."); - inManualChillMode.set(false); - } - } - - /** - * Puts the node manager into manual chill mode. - */ - @Override - public void enterChillMode() { - LOG.info("Entering manual chill mode."); - inManualChillMode.set(true); - } - - /** - * Brings node manager out of manual chill mode. - */ - @Override - public void exitChillMode() { - LOG.info("Leaving manual chill mode."); - inManualChillMode.set(false); - } - - /** - * Returns true if node manager is out of chill mode, else false. - * @return true if out of chill mode, else false - */ - @Override - public boolean isOutOfChillMode() { - return !(inStartupChillMode.get() || inManualChillMode.get()); - } - - /** - * Returns the Number of Datanodes by State they are in. - * - * @return int -- count - */ - @Override - public int getNodeCount(NodeState nodestate) { - return nodeStateManager.getNodeCount(nodestate); - } - - /** - * Returns the node state of a specific node. - * - * @param datanodeDetails - Datanode Details - * @return Healthy/Stale/Dead/Unknown. - */ - @Override - public NodeState getNodeState(DatanodeDetails datanodeDetails) { - try { - return nodeStateManager.getNodeState(datanodeDetails); - } catch (NodeNotFoundException e) { - // TODO: should we throw NodeNotFoundException? - return null; - } - } - - - private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) { - SCMNodeStat stat; - try { - stat = nodeStateManager.getNodeStat(dnId); - } catch (NodeNotFoundException e) { - LOG.debug("SCM updateNodeStat based on heartbeat from previous" + - "dead datanode {}", dnId); - stat = new SCMNodeStat(); - } - - if (nodeReport != null && nodeReport.getStorageReportCount() > 0) { - long totalCapacity = 0; - long totalRemaining = 0; - long totalScmUsed = 0; - List storageReports = nodeReport - .getStorageReportList(); - for (StorageReportProto report : storageReports) { - totalCapacity += report.getCapacity(); - totalRemaining += report.getRemaining(); - totalScmUsed+= report.getScmUsed(); - } - scmStat.subtract(stat); - stat.set(totalCapacity, totalScmUsed, totalRemaining); - scmStat.add(stat); - } - nodeStateManager.setNodeStat(dnId, stat); - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - unregisterMXBean(); - } - - /** - * Gets the version info from SCM. - * - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed by - * datanode. - */ - @Override - public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { - return VersionResponse.newBuilder() - .setVersion(this.version.getVersion()) - .addValue(OzoneConsts.SCM_ID, - this.scmManager.getScmStorage().getScmId()) - .addValue(OzoneConsts.CLUSTER_ID, this.scmManager.getScmStorage() - .getClusterID()) - .build(); - } - - /** - * Register the node if the node finds that it is not registered with any - * SCM. - * - * @param datanodeDetails - Send datanodeDetails with Node info. - * This function generates and assigns new datanode ID - * for the datanode. This allows SCM to be run independent - * of Namenode if required. - * @param nodeReport NodeReport. - * - * @return SCMHeartbeatResponseProto - */ - @Override - public RegisteredCommand register( - DatanodeDetails datanodeDetails, NodeReportProto nodeReport, - PipelineReportsProto pipelineReportsProto) { - - InetAddress dnAddress = Server.getRemoteIp(); - if (dnAddress != null) { - // Mostly called inside an RPC, update ip and peer hostname - datanodeDetails.setHostName(dnAddress.getHostName()); - datanodeDetails.setIpAddress(dnAddress.getHostAddress()); - } - UUID dnId = datanodeDetails.getUuid(); - try { - nodeStateManager.addNode(datanodeDetails); - nodeStateManager.setNodeStat(dnId, new SCMNodeStat()); - if(inStartupChillMode.get() && - nodeStateManager.getTotalNodeCount() >= getMinimumChillModeNodes()) { - inStartupChillMode.getAndSet(false); - LOG.info("Leaving startup chill mode."); - } - // Updating Node Report, as registration is successful - updateNodeStat(datanodeDetails.getUuid(), nodeReport); - LOG.info("Data node with ID: {} Registered.", datanodeDetails.getUuid()); - } catch (NodeAlreadyExistsException e) { - LOG.trace("Datanode is already registered. Datanode: {}", - datanodeDetails.toString()); - } - return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success) - .setDatanodeUUID(datanodeDetails.getUuidString()) - .setClusterID(this.clusterID) - .setHostname(datanodeDetails.getHostName()) - .setIpAddress(datanodeDetails.getIpAddress()) - .build(); - } - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * - * @param datanodeDetails - DatanodeDetailsProto. - * @return SCMheartbeat response. - * @throws IOException - */ - @Override - public List processHeartbeat(DatanodeDetails datanodeDetails) { - Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " + - "DatanodeDetails."); - try { - nodeStateManager.updateLastHeartbeatTime(datanodeDetails); - } catch (NodeNotFoundException e) { - LOG.warn("SCM receive heartbeat from unregistered datanode {}", - datanodeDetails); - commandQueue.addCommand(datanodeDetails.getUuid(), - new ReregisterCommand()); - } - return commandQueue.getCommand(datanodeDetails.getUuid()); - } - - /** - * Process node report. - * - * @param dnUuid - * @param nodeReport - */ - @Override - public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) { - this.updateNodeStat(dnUuid, nodeReport); - } - - /** - * Returns the aggregated node stats. - * @return the aggregated node stats. - */ - @Override - public SCMNodeStat getStats() { - return new SCMNodeStat(this.scmStat); - } - - /** - * Return a map of node stats. - * @return a map of individual node stats (live/stale but not dead). - */ - @Override - public Map getNodeStats() { - return nodeStateManager.getNodeStatsMap(); - } - - /** - * Return the node stat of the specified datanode. - * @param datanodeDetails - datanode ID. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - @Override - public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { - try { - return new SCMNodeMetric( - nodeStateManager.getNodeStat(datanodeDetails.getUuid())); - } catch (NodeNotFoundException e) { - LOG.info("SCM getNodeStat from a decommissioned or removed datanode {}", - datanodeDetails.getUuid()); - return null; - } - } - - @Override - public Map getNodeCount() { - Map nodeCountMap = new HashMap(); - for(NodeState state : NodeState.values()) { - nodeCountMap.put(state.toString(), getNodeCount(state)); - } - return nodeCountMap; - } - - /** - * Get set of pipelines a datanode is part of. - * @param dnId - datanodeID - * @return Set of PipelineID - */ - @Override - public Set getPipelineByDnID(UUID dnId) { - return nodeStateManager.getPipelineByDnID(dnId); - } - - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - @Override - public void addPipeline(Pipeline pipeline) { - nodeStateManager.addPipeline(pipeline); - } - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - @Override - public void removePipeline(Pipeline pipeline) { - nodeStateManager.removePipeline(pipeline); - } - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws SCMException - if datanode is not known. For new datanode use - * addDatanodeInContainerMap call. - */ - @Override - public void setContainersForDatanode(UUID uuid, - Set containerIds) throws SCMException { - nodeStateManager.setContainersForDatanode(uuid, containerIds); - } - - /** - * Process containerReport received from datanode. - * @param uuid - DataonodeID - * @param containerIds - Set of containerIDs - * @return The result after processing containerReport - */ - @Override - public ReportResult processContainerReport(UUID uuid, - Set containerIds) { - return nodeStateManager.processContainerReport(uuid, containerIds); - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - @Override - public Set getContainers(UUID uuid) { - return nodeStateManager.getContainers(uuid); - } - - /** - * Insert a new datanode with set of containerIDs for containers available - * on it. - * @param uuid - DatanodeID - * @param containerIDs - Set of ContainerIDs - * @throws SCMException - if datanode already exists - */ - @Override - public void addDatanodeInContainerMap(UUID uuid, - Set containerIDs) throws SCMException { - nodeStateManager.addDatanodeInContainerMap(uuid, containerIDs); - } - - // TODO: - // Since datanode commands are added through event queue, onMessage method - // should take care of adding commands to command queue. - // Refactor and remove all the usage of this method and delete this method. - @Override - public void addDatanodeCommand(UUID dnId, SCMCommand command) { - this.commandQueue.addCommand(dnId, command); - } - - /** - * This method is called by EventQueue whenever someone adds a new - * DATANODE_COMMAND to the Queue. - * - * @param commandForDatanode DatanodeCommand - * @param ignored publisher - */ - @Override - public void onMessage(CommandForDatanode commandForDatanode, - EventPublisher ignored) { - addDatanodeCommand(commandForDatanode.getDatanodeId(), - commandForDatanode.getCommand()); - } - - /** - * Update the node stats and cluster storage stats in this SCM Node Manager. - * - * @param dnUuid datanode uuid. - */ - @Override - public void processDeadNode(UUID dnUuid) { - try { - SCMNodeStat stat = nodeStateManager.getNodeStat(dnUuid); - if (stat != null) { - LOG.trace("Update stat values as Datanode {} is dead.", dnUuid); - scmStat.subtract(stat); - stat.set(0, 0, 0); - } - } catch (NodeNotFoundException e) { - LOG.warn("Can't update stats based on message of dead Datanode {}, it" - + " doesn't exist or decommissioned already.", dnUuid); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java deleted file mode 100644 index 32ecbad50ab50..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; - -import java.util.Set; -import java.util.UUID; - -/** - * - * This is the JMX management interface for node manager information. - */ -@InterfaceAudience.Private -public interface SCMNodeStorageStatMXBean { - /** - * Get the capacity of the dataNode. - * @param datanodeID Datanode Id - * @return long - */ - long getCapacity(UUID datanodeID); - - /** - * Returns the remaining space of a Datanode. - * @param datanodeId Datanode Id - * @return long - */ - long getRemainingSpace(UUID datanodeId); - - - /** - * Returns used space in bytes of a Datanode. - * @return long - */ - long getUsedSpace(UUID datanodeId); - - /** - * Returns the total capacity of all dataNodes. - * @return long - */ - long getTotalCapacity(); - - /** - * Returns the total Used Space in all Datanodes. - * @return long - */ - long getTotalSpaceUsed(); - - /** - * Returns the total Remaining Space in all Datanodes. - * @return long - */ - long getTotalFreeSpace(); - - /** - * Returns the set of disks for a given Datanode. - * @return set of storage volumes - */ - Set getStorageVolumes(UUID datanodeId); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java deleted file mode 100644 index 1b0e5b56e7760..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node; - - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE; - -/** - * This data structure maintains the disk space capacity, disk usage and free - * space availability per Datanode. - * This information is built from the DN node reports. - */ -public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean { - static final Logger LOG = - LoggerFactory.getLogger(SCMNodeStorageStatMap.class); - - private final double warningUtilizationThreshold; - private final double criticalUtilizationThreshold; - - private final Map> scmNodeStorageReportMap; - // NodeStorageInfo MXBean - private ObjectName scmNodeStorageInfoBean; - /** - * constructs the scmNodeStorageReportMap object. - */ - public SCMNodeStorageStatMap(OzoneConfiguration conf) { - // scmNodeStorageReportMap = new ConcurrentHashMap<>(); - scmNodeStorageReportMap = new ConcurrentHashMap<>(); - warningUtilizationThreshold = conf.getDouble( - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD, - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT); - criticalUtilizationThreshold = conf.getDouble( - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD, - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT); - } - - /** - * Enum that Describes what we should do at various thresholds. - */ - public enum UtilizationThreshold { - NORMAL, WARN, CRITICAL; - } - - /** - * Returns true if this a datanode that is already tracked by - * scmNodeStorageReportMap. - * - * @param datanodeID - UUID of the Datanode. - * @return True if this is tracked, false if this map does not know about it. - */ - public boolean isKnownDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - return scmNodeStorageReportMap.containsKey(datanodeID); - } - - public List getDatanodeList( - UtilizationThreshold threshold) { - return scmNodeStorageReportMap.entrySet().stream().filter( - entry -> (isThresholdReached(threshold, - getScmUsedratio(getUsedSpace(entry.getKey()), - getCapacity(entry.getKey()))))) - .map(Map.Entry::getKey) - .collect(Collectors.toList()); - } - - - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param report - set if StorageReports. - */ - public void insertNewDatanode(UUID datanodeID, - Set report) throws SCMException { - Preconditions.checkNotNull(report); - Preconditions.checkState(report.size() != 0); - Preconditions.checkNotNull(datanodeID); - synchronized (scmNodeStorageReportMap) { - if (isKnownDatanode(datanodeID)) { - throw new SCMException("Node already exists in the map", - DUPLICATE_DATANODE); - } - scmNodeStorageReportMap.putIfAbsent(datanodeID, report); - } - } - - //TODO: This should be called once SCMNodeManager gets Started. - private void registerMXBean() { - this.scmNodeStorageInfoBean = MBeans.register("StorageContainerManager", - "scmNodeStorageInfo", this); - } - - //TODO: Unregister call should happen as a part of SCMNodeManager shutdown. - private void unregisterMXBean() { - if(this.scmNodeStorageInfoBean != null) { - MBeans.unregister(this.scmNodeStorageInfoBean); - this.scmNodeStorageInfoBean = null; - } - } - /** - * Updates the Container list of an existing DN. - * - * @param datanodeID - UUID of DN. - * @param report - set of Storage Reports for the Datanode. - * @throws SCMException - if we don't know about this datanode, for new DN - * use addDatanodeInContainerMap. - */ - public void updateDatanodeMap(UUID datanodeID, - Set report) throws SCMException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(report); - Preconditions.checkState(report.size() != 0); - synchronized (scmNodeStorageReportMap) { - if (!scmNodeStorageReportMap.containsKey(datanodeID)) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - scmNodeStorageReportMap.put(datanodeID, report); - } - } - - public StorageReportResult processNodeReport(UUID datanodeID, - StorageContainerDatanodeProtocolProtos.NodeReportProto nodeReport) - throws IOException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(nodeReport); - - long totalCapacity = 0; - long totalRemaining = 0; - long totalScmUsed = 0; - Set storagReportSet = new HashSet<>(); - Set fullVolumeSet = new HashSet<>(); - Set failedVolumeSet = new HashSet<>(); - List - storageReports = nodeReport.getStorageReportList(); - for (StorageReportProto report : storageReports) { - StorageLocationReport storageReport = - StorageLocationReport.getFromProtobuf(report); - storagReportSet.add(storageReport); - if (report.hasFailed() && report.getFailed()) { - failedVolumeSet.add(storageReport); - } else if (isThresholdReached(UtilizationThreshold.CRITICAL, - getScmUsedratio(report.getScmUsed(), report.getCapacity()))) { - fullVolumeSet.add(storageReport); - } - totalCapacity += report.getCapacity(); - totalRemaining += report.getRemaining(); - totalScmUsed += report.getScmUsed(); - } - - if (!isKnownDatanode(datanodeID)) { - insertNewDatanode(datanodeID, storagReportSet); - } else { - updateDatanodeMap(datanodeID, storagReportSet); - } - if (isThresholdReached(UtilizationThreshold.CRITICAL, - getScmUsedratio(totalScmUsed, totalCapacity))) { - LOG.warn("Datanode {} is out of storage space. Capacity: {}, Used: {}", - datanodeID, totalCapacity, totalScmUsed); - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.DATANODE_OUT_OF_SPACE) - .setFullVolumeSet(fullVolumeSet).setFailedVolumeSet(failedVolumeSet) - .build(); - } - if (isThresholdReached(UtilizationThreshold.WARN, - getScmUsedratio(totalScmUsed, totalCapacity))) { - LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}", - datanodeID, totalCapacity, totalScmUsed); - } - - if (failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) { - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.STORAGE_OUT_OF_SPACE) - .setFullVolumeSet(fullVolumeSet).build(); - } - - if (!failedVolumeSet.isEmpty() && fullVolumeSet.isEmpty()) { - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.FAILED_STORAGE) - .setFailedVolumeSet(failedVolumeSet).build(); - } - if (!failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) { - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE) - .setFailedVolumeSet(failedVolumeSet).setFullVolumeSet(fullVolumeSet) - .build(); - } - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.ALL_IS_WELL).build(); - } - - private boolean isThresholdReached(UtilizationThreshold threshold, - double scmUsedratio) { - switch (threshold) { - case NORMAL: - return scmUsedratio < warningUtilizationThreshold; - case WARN: - return scmUsedratio >= warningUtilizationThreshold - && scmUsedratio < criticalUtilizationThreshold; - case CRITICAL: - return scmUsedratio >= criticalUtilizationThreshold; - default: - throw new RuntimeException("Unknown UtilizationThreshold value"); - } - } - - @Override - public long getCapacity(UUID dnId) { - long capacity = 0; - Set reportSet = scmNodeStorageReportMap.get(dnId); - for (StorageLocationReport report : reportSet) { - capacity += report.getCapacity(); - } - return capacity; - } - - @Override - public long getRemainingSpace(UUID dnId) { - long remaining = 0; - Set reportSet = scmNodeStorageReportMap.get(dnId); - for (StorageLocationReport report : reportSet) { - remaining += report.getRemaining(); - } - return remaining; - } - - @Override - public long getUsedSpace(UUID dnId) { - long scmUsed = 0; - Set reportSet = scmNodeStorageReportMap.get(dnId); - for (StorageLocationReport report : reportSet) { - scmUsed += report.getScmUsed(); - } - return scmUsed; - } - - @Override - public long getTotalCapacity() { - long capacity = 0; - Set dnIdSet = scmNodeStorageReportMap.keySet(); - for (UUID id : dnIdSet) { - capacity += getCapacity(id); - } - return capacity; - } - - @Override - public long getTotalSpaceUsed() { - long scmUsed = 0; - Set dnIdSet = scmNodeStorageReportMap.keySet(); - for (UUID id : dnIdSet) { - scmUsed += getUsedSpace(id); - } - return scmUsed; - } - - @Override - public long getTotalFreeSpace() { - long remaining = 0; - Set dnIdSet = scmNodeStorageReportMap.keySet(); - for (UUID id : dnIdSet) { - remaining += getRemainingSpace(id); - } - return remaining; - } - - /** - * removes the dataNode from scmNodeStorageReportMap. - * @param datanodeID - * @throws SCMException in case the dataNode is not found in the map. - */ - public void removeDatanode(UUID datanodeID) throws SCMException { - Preconditions.checkNotNull(datanodeID); - synchronized (scmNodeStorageReportMap) { - if (!scmNodeStorageReportMap.containsKey(datanodeID)) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - scmNodeStorageReportMap.remove(datanodeID); - } - } - - /** - * Returns the set of storage volumes for a Datanode. - * @param datanodeID - * @return set of storage volumes. - */ - - @Override - public Set getStorageVolumes(UUID datanodeID) { - return scmNodeStorageReportMap.get(datanodeID); - } - - - /** - * Truncate to 4 digits since uncontrolled precision is some times - * counter intuitive to what users expect. - * @param value - double. - * @return double. - */ - private double truncateDecimals(double value) { - final int multiplier = 10000; - return (double) ((long) (value * multiplier)) / multiplier; - } - - /** - * get the scmUsed ratio. - */ - public double getScmUsedratio(long scmUsed, long capacity) { - double scmUsedRatio = - truncateDecimals(scmUsed / (double) capacity); - return scmUsedRatio; - } - /** - * Results possible from processing a Node report by - * Node2ContainerMapper. - */ - public enum ReportStatus { - ALL_IS_WELL, - DATANODE_OUT_OF_SPACE, - STORAGE_OUT_OF_SPACE, - FAILED_STORAGE, - FAILED_AND_OUT_OF_SPACE_STORAGE - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java deleted file mode 100644 index 48939f1bae171..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -/** - * Handles Stale node event. - */ -public class StaleNodeHandler implements EventHandler { - - private final PipelineSelector pipelineSelector; - - public StaleNodeHandler(PipelineSelector pipelineSelector) { - this.pipelineSelector = pipelineSelector; - } - - @Override - public void onMessage(DatanodeDetails datanodeDetails, - EventPublisher publisher) { - pipelineSelector.handleStaleNode(datanodeDetails); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java deleted file mode 100644 index 0b63ceb5783a8..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java +++ /dev/null @@ -1,87 +0,0 @@ - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; - -import java.util.Set; - -/** - * A Container Report gets processsed by the Node2Container and returns the - * Report Result class. - */ -public class StorageReportResult { - private SCMNodeStorageStatMap.ReportStatus status; - private Set fullVolumes; - private Set failedVolumes; - - StorageReportResult(SCMNodeStorageStatMap.ReportStatus status, - Set fullVolumes, - Set failedVolumes) { - this.status = status; - this.fullVolumes = fullVolumes; - this.failedVolumes = failedVolumes; - } - - public SCMNodeStorageStatMap.ReportStatus getStatus() { - return status; - } - - public Set getFullVolumes() { - return fullVolumes; - } - - public Set getFailedVolumes() { - return failedVolumes; - } - - static class ReportResultBuilder { - private SCMNodeStorageStatMap.ReportStatus status; - private Set fullVolumes; - private Set failedVolumes; - - static ReportResultBuilder newBuilder() { - return new ReportResultBuilder(); - } - - public ReportResultBuilder setStatus( - SCMNodeStorageStatMap.ReportStatus newstatus) { - this.status = newstatus; - return this; - } - - public ReportResultBuilder setFullVolumeSet( - Set fullVolumesSet) { - this.fullVolumes = fullVolumesSet; - return this; - } - - public ReportResultBuilder setFailedVolumeSet( - Set failedVolumesSet) { - this.failedVolumes = failedVolumesSet; - return this; - } - - StorageReportResult build() { - return new StorageReportResult(status, fullVolumes, failedVolumes); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java deleted file mode 100644 index d6a8ad0394e73..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.node; - -/** - * The node package deals with node management. - *

- * The node manager takes care of node registrations, removal of node and - * handling of heartbeats. - *

- * The node manager maintains statistics that gets send as part of - * heartbeats. - *

- * The container manager polls the node manager to learn the state of - * datanodes that it is interested in. - *

- */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java deleted file mode 100644 index 9625f81908707..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .NO_SUCH_DATANODE; - -/** - * This data structure maintains the list of containers that is on a datanode. - * This information is built from the DN container reports. - */ -public class Node2ContainerMap extends Node2ObjectsMap { - - /** - * Constructs a Node2ContainerMap Object. - */ - public Node2ContainerMap() { - super(); - } - - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - public Set getContainers(UUID datanode) { - return getObjects(datanode); - } - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param containerIDs - List of ContainerIDs. - */ - public void insertNewDatanode(UUID datanodeID, Set containerIDs) - throws SCMException { - super.insertNewDatanode(datanodeID, containerIDs); - } - - /** - * Updates the Container list of an existing DN. - * - * @param datanodeID - UUID of DN. - * @param containers - Set of Containers tht is present on DN. - * @throws SCMException - if we don't know about this datanode, for new DN - * use addDatanodeInContainerMap. - */ - public void setContainersForDatanode(UUID datanodeID, - Set containers) throws SCMException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(containers); - if (dn2ObjectMap - .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) - == null) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - } - - @VisibleForTesting - public int size() { - return dn2ObjectMap.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java deleted file mode 100644 index e49a79c64f6b6..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -import java.util.UUID; -import java.util.Set; -import java.util.Map; -import java.util.TreeSet; -import java.util.HashSet; -import java.util.Collections; - -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE; - -/** - * This data structure maintains the list of containers that is on a datanode. - * This information is built from the DN container reports. - */ -public class Node2ObjectsMap { - protected final Map> dn2ObjectMap; - - /** - * Constructs a Node2ContainerMap Object. - */ - public Node2ObjectsMap() { - dn2ObjectMap = new ConcurrentHashMap<>(); - } - - /** - * Returns true if this a datanode that is already tracked by - * Node2ContainerMap. - * - * @param datanodeID - UUID of the Datanode. - * @return True if this is tracked, false if this map does not know about it. - */ - public boolean isKnownDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - return dn2ObjectMap.containsKey(datanodeID); - } - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param containerIDs - List of ContainerIDs. - */ - public void insertNewDatanode(UUID datanodeID, Set containerIDs) - throws SCMException { - Preconditions.checkNotNull(containerIDs); - Preconditions.checkNotNull(datanodeID); - if (dn2ObjectMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) - != null) { - throw new SCMException("Node already exists in the map", - DUPLICATE_DATANODE); - } - } - - /** - * Removes datanode Entry from the map. - * - * @param datanodeID - Datanode ID. - */ - void removeDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - dn2ObjectMap.computeIfPresent(datanodeID, (k, v) -> null); - } - - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - Set getObjects(UUID datanode) { - Preconditions.checkNotNull(datanode); - final Set s = dn2ObjectMap.get(datanode); - return s != null? Collections.unmodifiableSet(s): Collections.emptySet(); - } - - public ReportResult.ReportResultBuilder newBuilder() { - return new ReportResult.ReportResultBuilder<>(); - } - - public ReportResult processReport(UUID datanodeID, Set objects) { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(objects); - - if (!isKnownDatanode(datanodeID)) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND) - .setNewEntries(objects) - .build(); - } - - // Conditions like Zero length containers should be handled by removeAll. - Set currentSet = dn2ObjectMap.get(datanodeID); - TreeSet newObjects = new TreeSet<>(objects); - newObjects.removeAll(currentSet); - - TreeSet missingObjects = new TreeSet<>(currentSet); - missingObjects.removeAll(objects); - - if (newObjects.isEmpty() && missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.ALL_IS_WELL) - .build(); - } - - if (newObjects.isEmpty() && !missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.MISSING_ENTRIES) - .setMissingEntries(missingObjects) - .build(); - } - - if (!newObjects.isEmpty() && missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.NEW_ENTRIES_FOUND) - .setNewEntries(newObjects) - .build(); - } - - if (!newObjects.isEmpty() && !missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND) - .setNewEntries(newObjects) - .setMissingEntries(missingObjects) - .build(); - } - - // default status & Make compiler happy - return newBuilder() - .setStatus(ReportResult.ReportStatus.ALL_IS_WELL) - .build(); - } - - @VisibleForTesting - public int size() { - return dn2ObjectMap.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java deleted file mode 100644 index 87f2222b5ff86..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; - -/** - * This data structure maintains the list of pipelines which the given datanode is a part of. This - * information will be added whenever a new pipeline allocation happens. - * - *

TODO: this information needs to be regenerated from pipeline reports on SCM restart - */ -public class Node2PipelineMap extends Node2ObjectsMap { - - /** Constructs a Node2PipelineMap Object. */ - public Node2PipelineMap() { - super(); - } - - /** - * Returns null if there no pipelines associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of pipelines or Null. - */ - public Set getPipelines(UUID datanode) { - return getObjects(datanode); - } - - /** - * Adds a pipeline entry to a given dataNode in the map. - * - * @param pipeline Pipeline to be added - */ - public synchronized void addPipeline(Pipeline pipeline) { - for (DatanodeDetails details : pipeline.getDatanodes().values()) { - UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfAbsent(dnId, k -> new HashSet<>()) - .add(pipeline.getId()); - } - } - - public synchronized void removePipeline(Pipeline pipeline) { - for (DatanodeDetails details : pipeline.getDatanodes().values()) { - UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfPresent(dnId, - (k, v) -> { - v.remove(pipeline.getId()); - return v; - }); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java deleted file mode 100644 index aa5c382f42620..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -/** - * This exception represents that there is already a node added to NodeStateMap - * with same UUID. - */ -public class NodeAlreadyExistsException extends NodeException { - - /** - * Constructs an {@code NodeAlreadyExistsException} with {@code null} - * as its error detail message. - */ - public NodeAlreadyExistsException() { - super(); - } - - /** - * Constructs an {@code NodeAlreadyExistsException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public NodeAlreadyExistsException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java deleted file mode 100644 index c67b55d953149..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -/** - * This exception represents all node related exceptions in NodeStateMap. - */ -public class NodeException extends Exception { - - /** - * Constructs an {@code NodeException} with {@code null} - * as its error detail message. - */ - public NodeException() { - super(); - } - - /** - * Constructs an {@code NodeException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public NodeException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java deleted file mode 100644 index c44a08cf51e90..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -/** - * This exception represents that the node that is being accessed does not - * exist in NodeStateMap. - */ -public class NodeNotFoundException extends NodeException { - - - /** - * Constructs an {@code NodeNotFoundException} with {@code null} - * as its error detail message. - */ - public NodeNotFoundException() { - super(); - } - - /** - * Constructs an {@code NodeNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public NodeNotFoundException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java deleted file mode 100644 index 774ced18b0a29..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ /dev/null @@ -1,337 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.node.DatanodeInfo; - -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * Maintains the state of datanodes in SCM. This class should only be used by - * NodeStateManager to maintain the state. If anyone wants to change the - * state of a node they should call NodeStateManager, do not directly use - * this class. - */ -public class NodeStateMap { - - /** - * Node id to node info map. - */ - private final ConcurrentHashMap nodeMap; - /** - * Represents the current state of node. - */ - private final ConcurrentHashMap> stateMap; - /** - * Represents the current stats of node. - */ - private final ConcurrentHashMap nodeStats; - - private final ReadWriteLock lock; - - /** - * Creates a new instance of NodeStateMap with no nodes. - */ - public NodeStateMap() { - lock = new ReentrantReadWriteLock(); - nodeMap = new ConcurrentHashMap<>(); - stateMap = new ConcurrentHashMap<>(); - nodeStats = new ConcurrentHashMap<>(); - initStateMap(); - } - - /** - * Initializes the state map with available states. - */ - private void initStateMap() { - for (NodeState state : NodeState.values()) { - stateMap.put(state, new HashSet<>()); - } - } - - /** - * Adds a node to NodeStateMap. - * - * @param datanodeDetails DatanodeDetails - * @param nodeState initial NodeState - * - * @throws NodeAlreadyExistsException if the node already exist - */ - public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState) - throws NodeAlreadyExistsException { - lock.writeLock().lock(); - try { - UUID id = datanodeDetails.getUuid(); - if (nodeMap.containsKey(id)) { - throw new NodeAlreadyExistsException("Node UUID: " + id); - } - nodeMap.put(id, new DatanodeInfo(datanodeDetails)); - stateMap.get(nodeState).add(id); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Updates the node state. - * - * @param nodeId Node Id - * @param currentState current state - * @param newState new state - * - * @throws NodeNotFoundException if the node is not present - */ - public void updateNodeState(UUID nodeId, NodeState currentState, - NodeState newState)throws NodeNotFoundException { - lock.writeLock().lock(); - try { - if (stateMap.get(currentState).remove(nodeId)) { - stateMap.get(newState).add(nodeId); - } else { - throw new NodeNotFoundException("Node UUID: " + nodeId + - ", not found in state: " + currentState); - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns DatanodeDetails for the given node id. - * - * @param uuid Node Id - * - * @return DatanodeDetails of the node - * - * @throws NodeNotFoundException if the node is not present - */ - public DatanodeDetails getNodeDetails(UUID uuid) - throws NodeNotFoundException { - return getNodeInfo(uuid); - } - - /** - * Returns DatanodeInfo for the given node id. - * - * @param uuid Node Id - * - * @return DatanodeInfo of the node - * - * @throws NodeNotFoundException if the node is not present - */ - public DatanodeInfo getNodeInfo(UUID uuid) throws NodeNotFoundException { - lock.readLock().lock(); - try { - if (nodeMap.containsKey(uuid)) { - return nodeMap.get(uuid); - } - throw new NodeNotFoundException("Node UUID: " + uuid); - } finally { - lock.readLock().unlock(); - } - } - - - /** - * Returns the list of node ids which are in the specified state. - * - * @param state NodeState - * - * @return list of node ids - */ - public List getNodes(NodeState state) { - lock.readLock().lock(); - try { - return new LinkedList<>(stateMap.get(state)); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the list of all the node ids. - * - * @return list of all the node ids - */ - public List getAllNodes() { - lock.readLock().lock(); - try { - return new LinkedList<>(nodeMap.keySet()); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the count of nodes in the specified state. - * - * @param state NodeState - * - * @return Number of nodes in the specified state - */ - public int getNodeCount(NodeState state) { - lock.readLock().lock(); - try { - return stateMap.get(state).size(); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the total node count. - * - * @return node count - */ - public int getTotalNodeCount() { - lock.readLock().lock(); - try { - return nodeMap.size(); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the current state of the node. - * - * @param uuid node id - * - * @return NodeState - * - * @throws NodeNotFoundException if the node is not found - */ - public NodeState getNodeState(UUID uuid) throws NodeNotFoundException { - lock.readLock().lock(); - try { - for (Map.Entry> entry : stateMap.entrySet()) { - if (entry.getValue().contains(uuid)) { - return entry.getKey(); - } - } - throw new NodeNotFoundException("Node UUID: " + uuid); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Removes the node from NodeStateMap. - * - * @param uuid node id - * - * @throws NodeNotFoundException if the node is not found - */ - public void removeNode(UUID uuid) throws NodeNotFoundException { - lock.writeLock().lock(); - try { - if (nodeMap.containsKey(uuid)) { - for (Map.Entry> entry : stateMap.entrySet()) { - if(entry.getValue().remove(uuid)) { - break; - } - nodeMap.remove(uuid); - } - throw new NodeNotFoundException("Node UUID: " + uuid); - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the current stats of the node. - * - * @param uuid node id - * - * @return SCMNodeStat of the specify node. - * - * @throws NodeNotFoundException if the node is not found - */ - public SCMNodeStat getNodeStat(UUID uuid) throws NodeNotFoundException { - SCMNodeStat stat = nodeStats.get(uuid); - if (stat == null) { - throw new NodeNotFoundException("Node UUID: " + uuid); - } - return stat; - } - - /** - * Returns a unmodifiable copy of nodeStats. - * - * @return map with node stats. - */ - public Map getNodeStats() { - return Collections.unmodifiableMap(nodeStats); - } - - /** - * Set the current stats of the node. - * - * @param uuid node id - * - * @param newstat stat that will set to the specify node. - */ - public void setNodeStat(UUID uuid, SCMNodeStat newstat) { - nodeStats.put(uuid, newstat); - } - - /** - * Remove the current stats of the specify node. - * - * @param uuid node id - * - * @return SCMNodeStat the stat removed from the node. - * - * @throws NodeNotFoundException if the node is not found - */ - public SCMNodeStat removeNodeStat(UUID uuid) throws NodeNotFoundException { - SCMNodeStat stat = nodeStats.remove(uuid); - if (stat == null) { - throw new NodeNotFoundException("Node UUID: " + uuid); - } - return stat; - } - - /** - * Since we don't hold a global lock while constructing this string, - * the result might be inconsistent. If someone has changed the state of node - * while we are constructing the string, the result will be inconsistent. - * This should only be used for logging. We should not parse this string and - * use it for any critical calculations. - * - * @return current state of NodeStateMap - */ - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("Total number of nodes: ").append(getTotalNodeCount()); - for (NodeState state : NodeState.values()) { - builder.append("Number of nodes in ").append(state).append(" state: ") - .append(getNodeCount(state)); - } - return builder.toString(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java deleted file mode 100644 index 0c7610fc7bd42..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import java.util.Collections; -import java.util.Set; - -import com.google.common.base.Preconditions; - -/** - * A Container/Pipeline Report gets processed by the - * Node2Container/Node2Pipeline and returns Report Result class. - */ -public final class ReportResult { - private ReportStatus status; - private Set missingEntries; - private Set newEntries; - - private ReportResult(ReportStatus status, - Set missingEntries, - Set newEntries) { - this.status = status; - Preconditions.checkNotNull(missingEntries); - Preconditions.checkNotNull(newEntries); - this.missingEntries = missingEntries; - this.newEntries = newEntries; - } - - public ReportStatus getStatus() { - return status; - } - - public Set getMissingEntries() { - return missingEntries; - } - - public Set getNewEntries() { - return newEntries; - } - - /** - * Result after processing report for node2Object map. - * @param - */ - public static class ReportResultBuilder { - private ReportStatus status; - private Set missingEntries; - private Set newEntries; - - public ReportResultBuilder setStatus( - ReportStatus newStatus) { - this.status = newStatus; - return this; - } - - public ReportResultBuilder setMissingEntries( - Set missingEntriesList) { - this.missingEntries = missingEntriesList; - return this; - } - - public ReportResultBuilder setNewEntries( - Set newEntriesList) { - this.newEntries = newEntriesList; - return this; - } - - public ReportResult build() { - - Set nullSafeMissingEntries = this.missingEntries; - Set nullSafeNewEntries = this.newEntries; - if (nullSafeNewEntries == null) { - nullSafeNewEntries = Collections.emptySet(); - } - if (nullSafeMissingEntries == null) { - nullSafeMissingEntries = Collections.emptySet(); - } - return new ReportResult(status, nullSafeMissingEntries, - nullSafeNewEntries); - } - } - - /** - * Results possible from processing a report. - */ - public enum ReportStatus { - ALL_IS_WELL, - MISSING_ENTRIES, - NEW_ENTRIES_FOUND, - MISSING_AND_NEW_ENTRIES_FOUND, - NEW_DATANODE_FOUND, - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java deleted file mode 100644 index c429c5c3e138b..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * Node States package. - */ -package org.apache.hadoop.hdds.scm.node.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 4669e741ef05b..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm; - -/* - * This package contains StorageContainerManager classes. - */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineActionEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineActionEventHandler.java deleted file mode 100644 index 1053149bba8d2..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineActionEventHandler.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.pipelines; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .PipelineActionsFromDatanode; - -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles pipeline actions from datanode. - */ -public class PipelineActionEventHandler implements - EventHandler { - - public static final Logger LOG = LoggerFactory.getLogger( - PipelineActionEventHandler.class); - - public PipelineActionEventHandler() { - - } - - @Override - public void onMessage(PipelineActionsFromDatanode report, - EventPublisher publisher) { - for (PipelineAction action : report.getReport().getPipelineActionsList()) { - switch (action.getAction()) { - case CLOSE: - PipelineID pipelineID = PipelineID. - getFromProtobuf(action.getClosePipeline().getPipelineID()); - LOG.info("Closing pipeline " + pipelineID + " for reason:" + action - .getClosePipeline().getDetailedReason()); - publisher.fireEvent(SCMEvents.PIPELINE_CLOSE, pipelineID); - break; - default: - LOG.error("unknown pipeline action:{}" + action.getAction()); - } - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java deleted file mode 100644 index e49678fee8133..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.pipelines; - -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles pipeline close event. - */ -public class PipelineCloseHandler implements EventHandler { - private static final Logger LOG = LoggerFactory - .getLogger(PipelineCloseHandler.class); - - private final PipelineSelector pipelineSelector; - public PipelineCloseHandler(PipelineSelector pipelineSelector) { - this.pipelineSelector = pipelineSelector; - } - - @Override - public void onMessage(PipelineID pipelineID, EventPublisher publisher) { - Pipeline pipeline = pipelineSelector.getPipeline(pipelineID); - try { - if (pipeline != null) { - pipelineSelector.finalizePipeline(pipeline); - } else { - LOG.debug("pipeline:{} not found", pipelineID); - } - } catch (Exception e) { - LOG.info("failed to close pipeline:{}", pipelineID, e); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java deleted file mode 100644 index ca2e8786370d0..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.pipelines; - -import java.util.ArrayList; -import java.util.LinkedList; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Manage Ozone pipelines. - */ -public abstract class PipelineManager { - private static final Logger LOG = - LoggerFactory.getLogger(PipelineManager.class); - protected final ArrayList activePipelines; - - public PipelineManager() { - activePipelines = new ArrayList<>(); - for (ReplicationFactor factor : ReplicationFactor.values()) { - activePipelines.add(factor.ordinal(), new ActivePipelines()); - } - } - - /** - * List of active pipelines. - */ - public static class ActivePipelines { - private final List activePipelines; - private final AtomicInteger pipelineIndex; - - ActivePipelines() { - activePipelines = new LinkedList<>(); - pipelineIndex = new AtomicInteger(0); - } - - void addPipeline(PipelineID pipelineID) { - if (!activePipelines.contains(pipelineID)) { - activePipelines.add(pipelineID); - } - } - - public void removePipeline(PipelineID pipelineID) { - activePipelines.remove(pipelineID); - } - - /** - * Find a Pipeline that is operational. - * - * @return - Pipeline or null - */ - PipelineID findOpenPipeline() { - if (activePipelines.size() == 0) { - LOG.error("No Operational pipelines found. Returning null."); - return null; - } - return activePipelines.get(getNextIndex()); - } - - /** - * gets the next index of the Pipeline to get. - * - * @return index in the link list to get. - */ - private int getNextIndex() { - return pipelineIndex.incrementAndGet() % activePipelines.size(); - } - } - - /** - * This function is called by the Container Manager while allocating a new - * container. The client specifies what kind of replication pipeline is - * needed and based on the replication type in the request appropriate - * Interface is invoked. - * - * @param replicationFactor - Replication Factor - * @return a Pipeline. - */ - public synchronized final PipelineID getPipeline( - ReplicationFactor replicationFactor, ReplicationType replicationType) { - PipelineID id = - activePipelines.get(replicationFactor.ordinal()).findOpenPipeline(); - if (id != null) { - LOG.debug("re-used pipeline:{} for container with " + - "replicationType:{} replicationFactor:{}", - id, replicationType, replicationFactor); - } - if (id == null) { - LOG.error("Get pipeline call failed. We are not able to find" + - " operational pipeline."); - return null; - } else { - return id; - } - } - - void addOpenPipeline(Pipeline pipeline) { - activePipelines.get(pipeline.getFactor().ordinal()) - .addPipeline(pipeline.getId()); - } - - public abstract Pipeline allocatePipeline( - ReplicationFactor replicationFactor); - - /** - * Initialize the pipeline. - * TODO: move the initialization to Ozone Client later - */ - public abstract void initializePipeline(Pipeline pipeline) throws IOException; - - public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) { - if (pipeline.addMember(dn) - &&(pipeline.getDatanodes().size() == pipeline.getFactor().getNumber()) - && pipeline.getLifeCycleState() == HddsProtos.LifeCycleState.OPEN) { - addOpenPipeline(pipeline); - } - } - - /** - * Creates a pipeline with a specified replication factor and type. - * @param replicationFactor - Replication Factor. - * @param replicationType - Replication Type. - */ - public Pipeline createPipeline(ReplicationFactor replicationFactor, - ReplicationType replicationType) throws IOException { - Pipeline pipeline = allocatePipeline(replicationFactor); - if (pipeline != null) { - LOG.debug("created new pipeline:{} for container with " - + "replicationType:{} replicationFactor:{}", - pipeline.getId(), replicationType, replicationFactor); - } - return pipeline; - } - - /** - * Remove the pipeline from active allocation. - * @param pipeline pipeline to be finalized - */ - public abstract boolean finalizePipeline(Pipeline pipeline); - - /** - * - * @param pipeline - */ - public abstract void closePipeline(Pipeline pipeline) throws IOException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java deleted file mode 100644 index 933792bee345f..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipelines; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles Node Reports from datanode. - */ -public class PipelineReportHandler implements - EventHandler { - - private static final Logger LOGGER = LoggerFactory - .getLogger(PipelineReportHandler.class); - private final PipelineSelector pipelineSelector; - - public PipelineReportHandler(PipelineSelector pipelineSelector) { - Preconditions.checkNotNull(pipelineSelector); - this.pipelineSelector = pipelineSelector; - } - - @Override - public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, - EventPublisher publisher) { - Preconditions.checkNotNull(pipelineReportFromDatanode); - DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails(); - PipelineReportsProto pipelineReport = - pipelineReportFromDatanode.getReport(); - Preconditions.checkNotNull(dn, "Pipeline Report is " - + "missing DatanodeDetails."); - LOGGER.trace("Processing pipeline report for dn: {}", dn); - pipelineSelector.processPipelineReport(dn, pipelineReport); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java deleted file mode 100644 index c8d22ff645074..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java +++ /dev/null @@ -1,481 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.pipelines; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementRandom; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipelines.ratis.RatisManagerImpl; -import org.apache.hadoop.hdds.scm.pipelines.standalone.StandaloneManagerImpl; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.lease.Lease; -import org.apache.hadoop.ozone.lease.LeaseException; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.utils.MetadataStore; -import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.HashSet; -import java.util.List; -import java.util.HashMap; -import java.util.Set; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_PIPELINE_STATE; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_FIND_ACTIVE_PIPELINE; -import static org.apache.hadoop.hdds.server - .ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone - .OzoneConsts.SCM_PIPELINE_DB; - -/** - * Sends the request to the right pipeline manager. - */ -public class PipelineSelector { - private static final Logger LOG = - LoggerFactory.getLogger(PipelineSelector.class); - private final ContainerPlacementPolicy placementPolicy; - private final Map pipelineManagerMap; - private final Configuration conf; - private final EventPublisher eventPublisher; - private final long containerSize; - private final MetadataStore pipelineStore; - private final PipelineStateManager stateManager; - private final NodeManager nodeManager; - private final Map> pipeline2ContainerMap; - private final Map pipelineMap; - private final LeaseManager pipelineLeaseManager; - - /** - * Constructs a pipeline Selector. - * - * @param nodeManager - node manager - * @param conf - Ozone Config - */ - public PipelineSelector(NodeManager nodeManager, Configuration conf, - EventPublisher eventPublisher, int cacheSizeMB) throws IOException { - this.conf = conf; - this.eventPublisher = eventPublisher; - this.placementPolicy = createContainerPlacementPolicy(nodeManager, conf); - this.containerSize = (long)this.conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.BYTES); - pipelineMap = new ConcurrentHashMap<>(); - pipelineManagerMap = new HashMap<>(); - - pipelineManagerMap.put(ReplicationType.STAND_ALONE, - new StandaloneManagerImpl(nodeManager, placementPolicy, - containerSize)); - pipelineManagerMap.put(ReplicationType.RATIS, - new RatisManagerImpl(nodeManager, placementPolicy, - containerSize, conf)); - long pipelineCreationLeaseTimeout = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT, - ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - pipelineLeaseManager = new LeaseManager<>("PipelineCreation", - pipelineCreationLeaseTimeout); - pipelineLeaseManager.start(); - - stateManager = new PipelineStateManager(); - this.nodeManager = nodeManager; - pipeline2ContainerMap = new HashMap<>(); - - // Write the container name to pipeline mapping. - File metaDir = getOzoneMetaDirPath(conf); - File containerDBPath = new File(metaDir, SCM_PIPELINE_DB); - pipelineStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(containerDBPath) - .setCacheSize(cacheSizeMB * OzoneConsts.MB) - .build(); - - reloadExistingPipelines(); - } - - private void reloadExistingPipelines() throws IOException { - if (pipelineStore.isEmpty()) { - // Nothing to do just return - return; - } - - List> range = - pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, null); - - // Transform the values into the pipelines. - // TODO: filter by pipeline state - for (Map.Entry entry : range) { - Pipeline pipeline = Pipeline.getFromProtoBuf( - HddsProtos.Pipeline.PARSER.parseFrom(entry.getValue())); - Preconditions.checkNotNull(pipeline); - addExistingPipeline(pipeline); - } - } - - @VisibleForTesting - public Set getOpenContainerIDsByPipeline(PipelineID pipelineID) { - return pipeline2ContainerMap.get(pipelineID); - } - - public void addContainerToPipeline(PipelineID pipelineID, long containerID) { - pipeline2ContainerMap.get(pipelineID) - .add(ContainerID.valueof(containerID)); - } - - public void removeContainerFromPipeline(PipelineID pipelineID, - long containerID) throws IOException { - pipeline2ContainerMap.get(pipelineID) - .remove(ContainerID.valueof(containerID)); - closePipelineIfNoOpenContainers(pipelineMap.get(pipelineID)); - } - - /** - * Translates a list of nodes, ordered such that the first is the leader, into - * a corresponding {@link Pipeline} object. - * - * @param nodes - list of datanodes on which we will allocate the container. - * The first of the list will be the leader node. - * @return pipeline corresponding to nodes - */ - public static Pipeline newPipelineFromNodes( - List nodes, ReplicationType replicationType, - ReplicationFactor replicationFactor, PipelineID id) { - Preconditions.checkNotNull(nodes); - Preconditions.checkArgument(nodes.size() > 0); - String leaderId = nodes.get(0).getUuidString(); - // A new pipeline always starts in allocated state - Pipeline pipeline = new Pipeline(leaderId, LifeCycleState.ALLOCATED, - replicationType, replicationFactor, id); - for (DatanodeDetails node : nodes) { - pipeline.addMember(node); - } - return pipeline; - } - - /** - * Create pluggable container placement policy implementation instance. - * - * @param nodeManager - SCM node manager. - * @param conf - configuration. - * @return SCM container placement policy implementation instance. - */ - @SuppressWarnings("unchecked") - private static ContainerPlacementPolicy createContainerPlacementPolicy( - final NodeManager nodeManager, final Configuration conf) { - Class implClass = - (Class) conf.getClass( - ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementRandom.class); - - try { - Constructor ctor = - implClass.getDeclaredConstructor(NodeManager.class, - Configuration.class); - return ctor.newInstance(nodeManager, conf); - } catch (RuntimeException e) { - throw e; - } catch (InvocationTargetException e) { - throw new RuntimeException(implClass.getName() - + " could not be constructed.", e.getCause()); - } catch (Exception e) { - LOG.error("Unhandled exception occurred, Placement policy will not be " + - "functional."); - throw new IllegalArgumentException("Unable to load " + - "ContainerPlacementPolicy", e); - } - } - - /** - * This function is called by the Container Manager while allocating a new - * container. The client specifies what kind of replication pipeline is needed - * and based on the replication type in the request appropriate Interface is - * invoked. - */ - - public Pipeline getReplicationPipeline(ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) - throws IOException { - PipelineManager manager = pipelineManagerMap.get(replicationType); - Preconditions.checkNotNull(manager, "Found invalid pipeline manager"); - LOG.debug("Getting replication pipeline forReplicationType {} :" + - " ReplicationFactor {}", replicationType.toString(), - replicationFactor.toString()); - - /** - * In the Ozone world, we have a very simple policy. - * - * 1. Try to create a pipeline if there are enough free nodes. - * - * 2. This allows all nodes to part of a pipeline quickly. - * - * 3. if there are not enough free nodes, return already allocated pipeline - * in a round-robin fashion. - * - * TODO: Might have to come up with a better algorithm than this. - * Create a new placement policy that returns pipelines in round robin - * fashion. - */ - Pipeline pipeline = - manager.createPipeline(replicationFactor, replicationType); - if (pipeline == null) { - // try to return a pipeline from already allocated pipelines - PipelineID pipelineId = - manager.getPipeline(replicationFactor, replicationType); - if (pipelineId == null) { - throw new SCMException(FAILED_TO_FIND_ACTIVE_PIPELINE); - } - pipeline = pipelineMap.get(pipelineId); - Preconditions.checkArgument(pipeline.getLifeCycleState() == - LifeCycleState.OPEN); - } else { - pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(), - pipeline.getProtobufMessage().toByteArray()); - // if a new pipeline is created, initialize its state machine - updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATE); - - //TODO: move the initialization of pipeline to Ozone Client - manager.initializePipeline(pipeline); - updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATED); - } - return pipeline; - } - - /** - * This function to return pipeline for given pipeline id. - */ - public Pipeline getPipeline(PipelineID pipelineID) { - return pipelineMap.get(pipelineID); - } - - /** - * Finalize a given pipeline. - */ - public void finalizePipeline(Pipeline pipeline) throws IOException { - PipelineManager manager = pipelineManagerMap.get(pipeline.getType()); - Preconditions.checkNotNull(manager, "Found invalid pipeline manager"); - if (pipeline.getLifeCycleState() == LifeCycleState.CLOSING || - pipeline.getLifeCycleState() == LifeCycleState.CLOSED) { - LOG.debug("pipeline:{} already in closing state, skipping", - pipeline.getId()); - // already in closing/closed state - return; - } - - // Remove the pipeline from active allocation - if (manager.finalizePipeline(pipeline)) { - LOG.info("Finalizing pipeline. pipelineID: {}", pipeline.getId()); - updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.FINALIZE); - closePipelineIfNoOpenContainers(pipeline); - } - } - - /** - * Close a given pipeline. - */ - private void closePipelineIfNoOpenContainers(Pipeline pipeline) - throws IOException { - if (pipeline.getLifeCycleState() != LifeCycleState.CLOSING) { - return; - } - HashSet containerIDS = - pipeline2ContainerMap.get(pipeline.getId()); - if (containerIDS.size() == 0) { - updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CLOSE); - LOG.info("Closing pipeline. pipelineID: {}", pipeline.getId()); - } - } - - /** - * Close a given pipeline. - */ - private void closePipeline(Pipeline pipeline) throws IOException { - PipelineManager manager = pipelineManagerMap.get(pipeline.getType()); - Preconditions.checkNotNull(manager, "Found invalid pipeline manager"); - LOG.debug("Closing pipeline. pipelineID: {}", pipeline.getId()); - HashSet containers = - pipeline2ContainerMap.get(pipeline.getId()); - Preconditions.checkArgument(containers.size() == 0); - manager.closePipeline(pipeline); - } - - /** - * Add to a given pipeline. - */ - private void addOpenPipeline(Pipeline pipeline) { - PipelineManager manager = pipelineManagerMap.get(pipeline.getType()); - Preconditions.checkNotNull(manager, "Found invalid pipeline manager"); - LOG.debug("Adding Open pipeline. pipelineID: {}", pipeline.getId()); - manager.addOpenPipeline(pipeline); - } - - private void closeContainersByPipeline(Pipeline pipeline) { - HashSet containers = - pipeline2ContainerMap.get(pipeline.getId()); - for (ContainerID id : containers) { - eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, id); - } - } - - private void addExistingPipeline(Pipeline pipeline) throws IOException { - LifeCycleState state = pipeline.getLifeCycleState(); - switch (state) { - case ALLOCATED: - // a pipeline in allocated state is only present in SCM and does not exist - // on datanode, on SCM restart, this pipeline can be ignored. - break; - case CREATING: - case OPEN: - case CLOSING: - //TODO: process pipeline report and move pipeline to active queue - // when all the nodes have reported. - pipelineMap.put(pipeline.getId(), pipeline); - pipeline2ContainerMap.put(pipeline.getId(), new HashSet<>()); - nodeManager.addPipeline(pipeline); - // reset the datanodes in the pipeline - // they will be reset on - pipeline.resetPipeline(); - break; - case CLOSED: - // if the pipeline is in closed state, nothing to do. - break; - default: - throw new IOException("invalid pipeline state:" + state); - } - } - - public void handleStaleNode(DatanodeDetails dn) { - Set pipelineIDs = nodeManager.getPipelineByDnID(dn.getUuid()); - for (PipelineID id : pipelineIDs) { - LOG.info("closing pipeline {}.", id); - eventPublisher.fireEvent(SCMEvents.PIPELINE_CLOSE, id); - } - } - - void processPipelineReport(DatanodeDetails dn, - PipelineReportsProto pipelineReport) { - Set reportedPipelines = new HashSet<>(); - pipelineReport.getPipelineReportList(). - forEach(p -> - reportedPipelines.add( - processPipelineReport(p.getPipelineID(), dn))); - - //TODO: handle missing pipelines and new pipelines later - } - - private PipelineID processPipelineReport( - HddsProtos.PipelineID id, DatanodeDetails dn) { - PipelineID pipelineID = PipelineID.getFromProtobuf(id); - Pipeline pipeline = pipelineMap.get(pipelineID); - if (pipeline != null) { - pipelineManagerMap.get(pipeline.getType()) - .processPipelineReport(pipeline, dn); - } - return pipelineID; - } - - /** - * Update the Pipeline State to the next state. - * - * @param pipeline - Pipeline - * @param event - LifeCycle Event - * @throws SCMException on Failure. - */ - public void updatePipelineState(Pipeline pipeline, - HddsProtos.LifeCycleEvent event) throws IOException { - try { - switch (event) { - case CREATE: - pipelineMap.put(pipeline.getId(), pipeline); - pipeline2ContainerMap.put(pipeline.getId(), new HashSet<>()); - nodeManager.addPipeline(pipeline); - // Acquire lease on pipeline - Lease pipelineLease = pipelineLeaseManager.acquire(pipeline); - // Register callback to be executed in case of timeout - pipelineLease.registerCallBack(() -> { - updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.TIMEOUT); - return null; - }); - break; - case CREATED: - // Release the lease on pipeline - pipelineLeaseManager.release(pipeline); - addOpenPipeline(pipeline); - break; - - case FINALIZE: - closeContainersByPipeline(pipeline); - break; - - case CLOSE: - case TIMEOUT: - closePipeline(pipeline); - pipeline2ContainerMap.remove(pipeline.getId()); - nodeManager.removePipeline(pipeline); - pipelineMap.remove(pipeline.getId()); - break; - default: - throw new SCMException("Unsupported pipeline LifeCycleEvent.", - FAILED_TO_CHANGE_PIPELINE_STATE); - } - - stateManager.updatePipelineState(pipeline, event); - pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(), - pipeline.getProtobufMessage().toByteArray()); - } catch (LeaseException e) { - throw new IOException("Lease Exception.", e); - } - } - - public void shutdown() throws IOException { - if (pipelineLeaseManager != null) { - pipelineLeaseManager.shutdown(); - } - - if (pipelineStore != null) { - pipelineStore.close(); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineStateManager.java deleted file mode 100644 index 6054f165444ea..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineStateManager.java +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.pipelines; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; -import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_PIPELINE_STATE; - -/** - * Manages Pipeline states. - */ -public class PipelineStateManager { - private static final Logger LOG = - LoggerFactory.getLogger(PipelineStateManager.class); - - private final StateMachine stateMachine; - - PipelineStateManager() { - // Initialize the container state machine. - Set finalStates = new HashSet<>(); - // These are the steady states of a container. - finalStates.add(HddsProtos.LifeCycleState.OPEN); - finalStates.add(HddsProtos.LifeCycleState.CLOSED); - - this.stateMachine = new StateMachine<>(HddsProtos.LifeCycleState.ALLOCATED, - finalStates); - initializeStateMachine(); - } - - /** - * Event and State Transition Mapping. - * - * State: ALLOCATED ---------------> CREATING - * Event: CREATE - * - * State: CREATING ---------------> OPEN - * Event: CREATED - * - * State: OPEN ---------------> CLOSING - * Event: FINALIZE - * - * State: CLOSING ---------------> CLOSED - * Event: CLOSE - * - * State: CREATING ---------------> CLOSED - * Event: TIMEOUT - * - * - * Container State Flow: - * - * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING] - * (CREATE) | (CREATED) (FINALIZE) | - * | | - * | | - * |(TIMEOUT) |(CLOSE) - * | | - * +--------> [CLOSED] <--------+ - */ - private void initializeStateMachine() { - stateMachine.addTransition(HddsProtos.LifeCycleState.ALLOCATED, - HddsProtos.LifeCycleState.CREATING, - HddsProtos.LifeCycleEvent.CREATE); - - stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING, - HddsProtos.LifeCycleState.OPEN, - HddsProtos.LifeCycleEvent.CREATED); - - stateMachine.addTransition(HddsProtos.LifeCycleState.OPEN, - HddsProtos.LifeCycleState.CLOSING, - HddsProtos.LifeCycleEvent.FINALIZE); - - stateMachine.addTransition(HddsProtos.LifeCycleState.CLOSING, - HddsProtos.LifeCycleState.CLOSED, - HddsProtos.LifeCycleEvent.CLOSE); - - stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING, - HddsProtos.LifeCycleState.CLOSED, - HddsProtos.LifeCycleEvent.TIMEOUT); - } - - - /** - * Update the Pipeline State to the next state. - * - * @param pipeline - Pipeline - * @param event - LifeCycle Event - * @throws SCMException on Failure. - */ - public void updatePipelineState(Pipeline pipeline, - HddsProtos.LifeCycleEvent event) throws IOException { - HddsProtos.LifeCycleState newState; - try { - newState = stateMachine.getNextState(pipeline.getLifeCycleState(), event); - } catch (InvalidStateTransitionException ex) { - String error = String.format("Failed to update pipeline state %s, " + - "reason: invalid state transition from state: %s upon " + - "event: %s.", - pipeline.getId(), pipeline.getLifeCycleState(), event); - LOG.error(error); - throw new SCMException(error, FAILED_TO_CHANGE_PIPELINE_STATE); - } - - // This is a post condition after executing getNextState. - Preconditions.checkNotNull(newState); - Preconditions.checkNotNull(pipeline); - pipeline.setLifeCycleState(newState); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java deleted file mode 100644 index ea24c581a8cef..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipelines; -/** - Ozone supports the notion of different kind of pipelines. - That means that we can have a replication pipeline build on - Ratis, Standalone or some other protocol. All Pipeline managers - the entities in charge of pipelines reside in the package. - - Here is the high level Arch. - - 1. A pipeline selector class is instantiated in the Container manager class. - - 2. A client when creating a container -- will specify what kind of - replication type it wants to use. We support 2 types now, Ratis and StandAlone. - - 3. Based on the replication type, the pipeline selector class asks the - corresponding pipeline manager for a pipeline. - - 4. We have supported the ability for clients to specify a set of nodes in - the pipeline or rely in the pipeline manager to select the datanodes if they - are not specified. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java deleted file mode 100644 index 905a5b553b7f6..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.pipelines.ratis; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipelines.PipelineManager; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; - -/** - * Implementation of {@link PipelineManager}. - * - * TODO : Introduce a state machine. - */ -public class RatisManagerImpl extends PipelineManager { - private static final Logger LOG = - LoggerFactory.getLogger(RatisManagerImpl.class); - private final Configuration conf; - private final NodeManager nodeManager; - private final Set ratisMembers; - - /** - * Constructs a Ratis Pipeline Manager. - * - * @param nodeManager - */ - public RatisManagerImpl(NodeManager nodeManager, - ContainerPlacementPolicy placementPolicy, long size, Configuration conf) { - super(); - this.conf = conf; - this.nodeManager = nodeManager; - ratisMembers = new HashSet<>(); - } - - /** - * Allocates a new ratis Pipeline from the free nodes. - * - * @param factor - One or Three - * @return Pipeline. - */ - public Pipeline allocatePipeline(ReplicationFactor factor) { - List newNodesList = new LinkedList<>(); - List datanodes = nodeManager.getNodes(NodeState.HEALTHY); - //TODO: Add Raft State to the Nodes, so we can query and skip nodes from - // data from datanode instead of maintaining a set. - for (DatanodeDetails datanode : datanodes) { - Preconditions.checkNotNull(datanode); - if (!ratisMembers.contains(datanode)) { - newNodesList.add(datanode); - if (newNodesList.size() == factor.getNumber()) { - // once a datanode has been added to a pipeline, exclude it from - // further allocations - ratisMembers.addAll(newNodesList); - PipelineID pipelineID = PipelineID.randomId(); - LOG.info("Allocating a new ratis pipeline of size: {} id: {}", - factor.getNumber(), pipelineID); - return PipelineSelector.newPipelineFromNodes(newNodesList, - ReplicationType.RATIS, factor, pipelineID); - } - } - } - return null; - } - - public void initializePipeline(Pipeline pipeline) throws IOException { - //TODO:move the initialization from SCM to client - try (XceiverClientRatis client = - XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) { - client.createPipeline(); - } - } - - public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) { - super.processPipelineReport(pipeline, dn); - ratisMembers.add(dn); - } - - public synchronized boolean finalizePipeline(Pipeline pipeline) { - activePipelines.get(pipeline.getFactor().ordinal()) - .removePipeline(pipeline.getId()); - return true; - } - - /** - * Close the pipeline. - */ - public void closePipeline(Pipeline pipeline) throws IOException { - try (XceiverClientRatis client = - XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) { - client.destroyPipeline(); - } - for (DatanodeDetails node : pipeline.getMachines()) { - // A node should always be the in ratis members list. - Preconditions.checkArgument(ratisMembers.remove(node)); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java deleted file mode 100644 index 2970fb354bf86..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipelines.ratis; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java deleted file mode 100644 index 045afb6ceb910..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.pipelines.standalone; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipelines.PipelineManager; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; - -/** - * Standalone Manager Impl to prove that pluggable interface - * works with current tests. - */ -public class StandaloneManagerImpl extends PipelineManager { - private static final Logger LOG = - LoggerFactory.getLogger(StandaloneManagerImpl.class); - private final NodeManager nodeManager; - private final ContainerPlacementPolicy placementPolicy; - private final long containerSize; - private final Set standAloneMembers; - - /** - * Constructor for Standalone Node Manager Impl. - * @param nodeManager - Node Manager. - * @param placementPolicy - Placement Policy - * @param containerSize - Container Size. - */ - public StandaloneManagerImpl(NodeManager nodeManager, - ContainerPlacementPolicy placementPolicy, long containerSize) { - super(); - this.nodeManager = nodeManager; - this.placementPolicy = placementPolicy; - this.containerSize = containerSize; - this.standAloneMembers = new HashSet<>(); - } - - - /** - * Allocates a new standalone Pipeline from the free nodes. - * - * @param factor - One - * @return Pipeline. - */ - public Pipeline allocatePipeline(ReplicationFactor factor) { - List newNodesList = new LinkedList<>(); - List datanodes = nodeManager.getNodes(NodeState.HEALTHY); - for (DatanodeDetails datanode : datanodes) { - Preconditions.checkNotNull(datanode); - if (!standAloneMembers.contains(datanode)) { - newNodesList.add(datanode); - if (newNodesList.size() == factor.getNumber()) { - // once a datanode has been added to a pipeline, exclude it from - // further allocations - standAloneMembers.addAll(newNodesList); - // Standalone pipeline use node id as pipeline - PipelineID pipelineID = - PipelineID.valueOf(newNodesList.get(0).getUuid()); - LOG.info("Allocating a new standalone pipeline of size: {} id: {}", - factor.getNumber(), pipelineID); - return PipelineSelector.newPipelineFromNodes(newNodesList, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineID); - } - } - } - return null; - } - - public void initializePipeline(Pipeline pipeline) { - // Nothing to be done for standalone pipeline - } - - public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) { - super.processPipelineReport(pipeline, dn); - standAloneMembers.add(dn); - } - - public synchronized boolean finalizePipeline(Pipeline pipeline) { - activePipelines.get(pipeline.getFactor().ordinal()) - .removePipeline(pipeline.getId()); - return false; - } - - /** - * Close the pipeline. - */ - public void closePipeline(Pipeline pipeline) throws IOException { - for (DatanodeDetails node : pipeline.getMachines()) { - // A node should always be the in standalone members list. - Preconditions.checkArgument(standAloneMembers.remove(node)); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java deleted file mode 100644 index b2c3ca40e50e8..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipelines.standalone; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java deleted file mode 100644 index 4944017593207..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.ratis; - -/** - * This package contains classes related to Apache Ratis for SCM. - */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java deleted file mode 100644 index b92413e80ca72..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.scm.server.SCMChillModeManager.ChillModeRestrictedOps; - -/** - * Chill mode pre-check for SCM operations. - * */ -public class ChillModePrecheck implements Precheck { - - private AtomicBoolean inChillMode = new AtomicBoolean(true); - public static final String PRECHECK_TYPE = "ChillModePrecheck"; - - public boolean check(ScmOps op) throws SCMException { - if (inChillMode.get() && ChillModeRestrictedOps - .isRestrictedInChillMode(op)) { - throw new SCMException("ChillModePrecheck failed for " + op, - ResultCodes.CHILL_MODE_EXCEPTION); - } - return inChillMode.get(); - } - - @Override - public String type() { - return PRECHECK_TYPE; - } - - public boolean isInChillMode() { - return inChillMode.get(); - } - - public void setInChillMode(boolean inChillMode) { - this.inChillMode.set(inChillMode); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/Precheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/Precheck.java deleted file mode 100644 index 1654990830905..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/Precheck.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -/** - * Precheck for SCM operations. - * */ -public interface Precheck { - boolean check(T t) throws SCMException; - String type(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java deleted file mode 100644 index 3bb284e8d0232..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ /dev/null @@ -1,206 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.protobuf.BlockingService; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.ozone.protocolPB - .ScmBlockLocationProtocolServerSideTranslatorPB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; -import static org.apache.hadoop.hdds.scm.server.StorageContainerManager - .startRpcServer; - -/** - * SCM block protocol is the protocol used by Namenode and OzoneManager to get - * blocks from the SCM. - */ -public class SCMBlockProtocolServer implements ScmBlockLocationProtocol { - private static final Logger LOG = - LoggerFactory.getLogger(SCMBlockProtocolServer.class); - - private final StorageContainerManager scm; - private final OzoneConfiguration conf; - private final RPC.Server blockRpcServer; - private final InetSocketAddress blockRpcAddress; - - /** - * The RPC server that listens to requests from block service clients. - */ - public SCMBlockProtocolServer(OzoneConfiguration conf, - StorageContainerManager scm) throws IOException { - this.scm = scm; - this.conf = conf; - final int handlerCount = - conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, - OZONE_SCM_HANDLER_COUNT_DEFAULT); - - RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class, - ProtobufRpcEngine.class); - // SCM Block Service RPC - BlockingService blockProtoPbService = - ScmBlockLocationProtocolProtos.ScmBlockLocationProtocolService - .newReflectiveBlockingService( - new ScmBlockLocationProtocolServerSideTranslatorPB(this)); - - final InetSocketAddress scmBlockAddress = HddsServerUtil - .getScmBlockClientBindAddress(conf); - blockRpcServer = - startRpcServer( - conf, - scmBlockAddress, - ScmBlockLocationProtocolPB.class, - blockProtoPbService, - handlerCount); - blockRpcAddress = - updateRPCListenAddress( - conf, OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmBlockAddress, - blockRpcServer); - - } - - public RPC.Server getBlockRpcServer() { - return blockRpcServer; - } - - public InetSocketAddress getBlockRpcAddress() { - return blockRpcAddress; - } - - public void start() { - LOG.info( - StorageContainerManager.buildRpcServerStartMessage( - "RPC server for Block Protocol", getBlockRpcAddress())); - getBlockRpcServer().start(); - } - - public void stop() { - try { - LOG.info("Stopping the RPC server for Block Protocol"); - getBlockRpcServer().stop(); - } catch (Exception ex) { - LOG.error("Block Protocol RPC stop failed.", ex); - } - IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager()); - } - - public void join() throws InterruptedException { - LOG.trace("Join RPC server for Block Protocol"); - getBlockRpcServer().join(); - } - - @Override - public AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType - type, HddsProtos.ReplicationFactor factor, String owner) throws - IOException { - return scm.getScmBlockManager().allocateBlock(size, type, factor, owner); - } - - /** - * Delete blocks for a set of object keys. - * - * @param keyBlocksInfoList list of block keys with object keys to delete. - * @return deletion results. - */ - @Override - public List deleteKeyBlocks( - List keyBlocksInfoList) throws IOException { - LOG.info("SCM is informed by OM to delete {} blocks", keyBlocksInfoList - .size()); - List results = new ArrayList<>(); - for (BlockGroup keyBlocks : keyBlocksInfoList) { - ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result resultCode; - try { - // We delete blocks in an atomic operation to prevent getting - // into state like only a partial of blocks are deleted, - // which will leave key in an inconsistent state. - scm.getScmBlockManager().deleteBlocks(keyBlocks.getBlockIDList()); - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.success; - } catch (SCMException scmEx) { - LOG.warn("Fail to delete block: {}", keyBlocks.getGroupID(), scmEx); - switch (scmEx.getResult()) { - case CHILL_MODE_EXCEPTION: - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.chillMode; - break; - case FAILED_TO_FIND_BLOCK: - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.errorNotFound; - break; - default: - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.unknownFailure; - } - } catch (IOException ex) { - LOG.warn("Fail to delete blocks for object key: {}", keyBlocks - .getGroupID(), ex); - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.unknownFailure; - } - List blockResultList = new ArrayList<>(); - for (BlockID blockKey : keyBlocks.getBlockIDList()) { - blockResultList.add(new DeleteBlockResult(blockKey, resultCode)); - } - results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(), - blockResultList)); - } - return results; - } - - @Override - public ScmInfo getScmInfo() throws IOException { - ScmInfo.Builder builder = - new ScmInfo.Builder() - .setClusterId(scm.getScmStorage().getClusterID()) - .setScmId(scm.getScmStorage().getScmId()); - return builder.build(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java deleted file mode 100644 index 3c1cc8ff3db70..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.annotations.VisibleForTesting; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer - .NodeRegistrationContainerReport; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * StorageContainerManager enters chill mode on startup to allow system to - * reach a stable state before becoming fully functional. SCM will wait - * for certain resources to be reported before coming out of chill mode. - * - * ChillModeExitRule defines format to define new rules which must be satisfied - * to exit Chill mode. - * ContainerChillModeRule defines the only exit criteria right now. - * On every new datanode registration event this class adds replicas - * for reported containers and validates if cutoff threshold for - * containers is meet. - */ -public class SCMChillModeManager implements - EventHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMChillModeManager.class); - private AtomicBoolean inChillMode = new AtomicBoolean(true); - private AtomicLong containerWithMinReplicas = new AtomicLong(0); - private Map exitRules = new HashMap(1); - private Configuration config; - private static final String CONT_EXIT_RULE = "ContainerChillModeRule"; - private final EventQueue eventPublisher; - - SCMChillModeManager(Configuration conf, List allContainers, - EventQueue eventQueue) { - this.config = conf; - this.eventPublisher = eventQueue; - exitRules - .put(CONT_EXIT_RULE, new ContainerChillModeRule(config, allContainers)); - if (!conf.getBoolean(HddsConfigKeys.HDDS_SCM_CHILLMODE_ENABLED, - HddsConfigKeys.HDDS_SCM_CHILLMODE_ENABLED_DEFAULT)) { - exitChillMode(eventQueue); - } - emitChillModeStatus(); - } - - /** - * Emit Chill mode status. - */ - @VisibleForTesting - public void emitChillModeStatus() { - eventPublisher.fireEvent(SCMEvents.CHILL_MODE_STATUS, inChillMode.get()); - } - - private void validateChillModeExitRules(EventPublisher eventQueue) { - for (ChillModeExitRule exitRule : exitRules.values()) { - if (!exitRule.validate()) { - return; - } - } - exitChillMode(eventQueue); - } - - /** - * Exit chill mode. It does following actions: - * 1. Set chill mode status to fale. - * 2. Emits START_REPLICATION for ReplicationManager. - * 3. Cleanup resources. - * 4. Emit chill mode status. - * @param eventQueue - */ - @VisibleForTesting - public void exitChillMode(EventPublisher eventQueue) { - LOG.info("SCM exiting chill mode."); - setInChillMode(false); - - // TODO: Remove handler registration as there is no need to listen to - // register events anymore. - - for (ChillModeExitRule e : exitRules.values()) { - e.cleanup(); - } - emitChillModeStatus(); - } - - @Override - public void onMessage( - NodeRegistrationContainerReport nodeRegistrationContainerReport, - EventPublisher publisher) { - if (getInChillMode()) { - exitRules.get(CONT_EXIT_RULE).process(nodeRegistrationContainerReport); - validateChillModeExitRules(publisher); - } - } - - public boolean getInChillMode() { - return inChillMode.get(); - } - - /** - * Set chill mode status. - */ - public void setInChillMode(boolean inChillMode) { - this.inChillMode.set(inChillMode); - } - - /** - * Interface for defining chill mode exit rules. - * - * @param - */ - public interface ChillModeExitRule { - - boolean validate(); - - void process(T report); - - void cleanup(); - } - - /** - * Class defining Chill mode exit criteria for Containers. - */ - public class ContainerChillModeRule implements - ChillModeExitRule { - - // Required cutoff % for containers with at least 1 reported replica. - private double chillModeCutoff; - // Containers read from scm db. - private Map containerMap; - private double maxContainer; - - public ContainerChillModeRule(Configuration conf, - List containers) { - chillModeCutoff = conf - .getDouble(HddsConfigKeys.HDDS_SCM_CHILLMODE_THRESHOLD_PCT, - HddsConfigKeys.HDDS_SCM_CHILLMODE_THRESHOLD_PCT_DEFAULT); - containerMap = new ConcurrentHashMap<>(); - if(containers != null) { - containers.forEach(c -> { - if (c != null) { - containerMap.put(c.getContainerID(), c); - } - }); - maxContainer = containers.size(); - } - } - - @Override - public boolean validate() { - if (maxContainer == 0) { - return true; - } - return getCurrentContainerThreshold() >= chillModeCutoff; - } - - @VisibleForTesting - public double getCurrentContainerThreshold() { - return (containerWithMinReplicas.doubleValue() / maxContainer); - } - - @Override - public void process(NodeRegistrationContainerReport reportsProto) { - if (maxContainer == 0) { - // No container to check. - return; - } - - reportsProto.getReport().getReportsList().forEach(c -> { - if (containerMap.containsKey(c.getContainerID())) { - if(containerMap.remove(c.getContainerID()) != null) { - containerWithMinReplicas.getAndAdd(1); - } - } - }); - if(inChillMode.get()) { - LOG.info("SCM in chill mode. {} % containers have at least one" - + " reported replica.", - (containerWithMinReplicas.get() / maxContainer) * 100); - } - } - - @Override - public void cleanup() { - containerMap.clear(); - } - } - - @VisibleForTesting - public static Logger getLogger() { - return LOG; - } - - @VisibleForTesting - public double getCurrentContainerThreshold() { - return ((ContainerChillModeRule) exitRules.get(CONT_EXIT_RULE)) - .getCurrentContainerThreshold(); - } - - /** - * Operations restricted in SCM chill mode. - */ - public static class ChillModeRestrictedOps { - private static EnumSet restrictedOps = EnumSet.noneOf(ScmOps.class); - - static { - restrictedOps.add(ScmOps.allocateBlock); - restrictedOps.add(ScmOps.allocateContainer); - } - - public static boolean isRestrictedInChillMode(ScmOps opName) { - return restrictedOps.contains(opName); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java deleted file mode 100644 index 66136f172508c..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ /dev/null @@ -1,380 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.protobuf.BlockingService; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.ScmUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerLocationProtocolServerSideTranslatorPB; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; - -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos - .StorageContainerLocationProtocolService.newReflectiveBlockingService; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CLIENT_ADDRESS_KEY; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; -import static org.apache.hadoop.hdds.scm.server.StorageContainerManager - .startRpcServer; - -/** - * The RPC server that listens to requests from clients. - */ -public class SCMClientProtocolServer implements - StorageContainerLocationProtocol, EventHandler { - private static final Logger LOG = - LoggerFactory.getLogger(SCMClientProtocolServer.class); - private final RPC.Server clientRpcServer; - private final InetSocketAddress clientRpcAddress; - private final StorageContainerManager scm; - private final OzoneConfiguration conf; - private ChillModePrecheck chillModePrecheck = new ChillModePrecheck(); - - public SCMClientProtocolServer(OzoneConfiguration conf, - StorageContainerManager scm) throws IOException { - this.scm = scm; - this.conf = conf; - final int handlerCount = - conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, - OZONE_SCM_HANDLER_COUNT_DEFAULT); - RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - - // SCM Container Service RPC - BlockingService storageProtoPbService = - newReflectiveBlockingService( - new StorageContainerLocationProtocolServerSideTranslatorPB(this)); - - final InetSocketAddress scmAddress = HddsServerUtil - .getScmClientBindAddress(conf); - clientRpcServer = - startRpcServer( - conf, - scmAddress, - StorageContainerLocationProtocolPB.class, - storageProtoPbService, - handlerCount); - clientRpcAddress = - updateRPCListenAddress(conf, OZONE_SCM_CLIENT_ADDRESS_KEY, - scmAddress, clientRpcServer); - - } - - public RPC.Server getClientRpcServer() { - return clientRpcServer; - } - - public InetSocketAddress getClientRpcAddress() { - return clientRpcAddress; - } - - public void start() { - LOG.info( - StorageContainerManager.buildRpcServerStartMessage( - "RPC server for Client ", getClientRpcAddress())); - getClientRpcServer().start(); - } - - public void stop() { - try { - LOG.info("Stopping the RPC server for Client Protocol"); - getClientRpcServer().stop(); - } catch (Exception ex) { - LOG.error("Client Protocol RPC stop failed.", ex); - } - IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager()); - } - - public void join() throws InterruptedException { - LOG.trace("Join RPC server for Client Protocol"); - getClientRpcServer().join(); - } - - @VisibleForTesting - public String getRpcRemoteUsername() { - UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser(); - return user == null ? null : user.getUserName(); - } - - @Override - public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType - replicationType, HddsProtos.ReplicationFactor factor, - String owner) throws IOException { - ScmUtils.preCheck(ScmOps.allocateContainer, chillModePrecheck); - String remoteUser = getRpcRemoteUsername(); - getScm().checkAdminAccess(remoteUser); - - return scm.getScmContainerManager() - .allocateContainer(replicationType, factor, owner); - } - - @Override - public ContainerInfo getContainer(long containerID) throws IOException { - String remoteUser = getRpcRemoteUsername(); - getScm().checkAdminAccess(remoteUser); - return scm.getScmContainerManager() - .getContainer(containerID); - } - - @Override - public ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException { - if (chillModePrecheck.isInChillMode()) { - ContainerInfo contInfo = scm.getScmContainerManager() - .getContainer(containerID); - if (contInfo.isContainerOpen()) { - if (!hasRequiredReplicas(contInfo)) { - throw new SCMException("Open container " + containerID + " doesn't" - + " have enough replicas to service this operation in " - + "Chill mode.", ResultCodes.CHILL_MODE_EXCEPTION); - } - } - } - String remoteUser = getRpcRemoteUsername(); - getScm().checkAdminAccess(remoteUser); - return scm.getScmContainerManager() - .getContainerWithPipeline(containerID); - } - - /** - * Check if container reported replicas are equal or greater than required - * replication factor. - */ - private boolean hasRequiredReplicas(ContainerInfo contInfo) { - try{ - return getScm().getScmContainerManager().getStateManager() - .getContainerReplicas(contInfo.containerID()) - .size() >= contInfo.getReplicationFactor().getNumber(); - } catch (SCMException ex) { - // getContainerReplicas throws exception if no replica's exist for given - // container. - return false; - } - } - - @Override - public List listContainer(long startContainerID, - int count) throws IOException { - return scm.getScmContainerManager(). - listContainer(startContainerID, count); - } - - @Override - public void deleteContainer(long containerID) throws IOException { - String remoteUser = getRpcRemoteUsername(); - getScm().checkAdminAccess(remoteUser); - scm.getScmContainerManager().deleteContainer(containerID); - - } - - @Override - public List queryNode(HddsProtos.NodeState state, - HddsProtos.QueryScope queryScope, String poolName) throws - IOException { - - if (queryScope == HddsProtos.QueryScope.POOL) { - throw new IllegalArgumentException("Not Supported yet"); - } - - List result = new ArrayList<>(); - queryNode(state).forEach(node -> result.add(HddsProtos.Node.newBuilder() - .setNodeID(node.getProtoBufMessage()) - .addNodeStates(state) - .build())); - - return result; - - } - - @Override - public void notifyObjectStageChange(StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Type type, long id, - StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto.Op - op, StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Stage stage) throws IOException { - - LOG.info("Object type {} id {} op {} new stage {}", type, id, op, - stage); - if (type == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Type.container) { - if (op == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Op.create) { - if (stage == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Stage.begin) { - scm.getScmContainerManager().updateContainerState(id, HddsProtos - .LifeCycleEvent.CREATE); - } else { - scm.getScmContainerManager().updateContainerState(id, HddsProtos - .LifeCycleEvent.CREATED); - } - } else { - if (op == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Op.close) { - if (stage == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Stage.begin) { - scm.getScmContainerManager().updateContainerState(id, HddsProtos - .LifeCycleEvent.FINALIZE); - } else { - scm.getScmContainerManager().updateContainerState(id, HddsProtos - .LifeCycleEvent.CLOSE); - } - } - } - } // else if (type == ObjectStageChangeRequestProto.Type.pipeline) { - // TODO: pipeline state update will be addressed in future patch. - // } - - } - - @Override - public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException { - // TODO: will be addressed in future patch. - // This is needed only for debugging purposes to make sure cluster is - // working correctly. - return null; - } - - @Override - public ScmInfo getScmInfo() throws IOException { - ScmInfo.Builder builder = - new ScmInfo.Builder() - .setClusterId(scm.getScmStorage().getClusterID()) - .setScmId(scm.getScmStorage().getScmId()); - return builder.build(); - } - - /** - * Check if SCM is in chill mode. - * - * @return Returns true if SCM is in chill mode else returns false. - * @throws IOException - */ - @Override - public boolean inChillMode() throws IOException { - return scm.isInChillMode(); - } - - /** - * Force SCM out of Chill mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - @Override - public boolean forceExitChillMode() throws IOException { - return scm.exitChillMode(); - } - - /** - * Queries a list of Node that match a set of statuses. - * - *

For example, if the nodeStatuses is HEALTHY and RAFT_MEMBER, then - * this call will return all - * healthy nodes which members in Raft pipeline. - * - *

Right now we don't support operations, so we assume it is an AND - * operation between the - * operators. - * - * @param state - NodeStates. - * @return List of Datanodes. - */ - public List queryNode(HddsProtos.NodeState state) { - Preconditions.checkNotNull(state, "Node Query set cannot be null"); - return new LinkedList<>(queryNodeState(state)); - } - - @VisibleForTesting - public StorageContainerManager getScm() { - return scm; - } - - /** - * Set chill mode status based on SCMEvents.CHILL_MODE_STATUS event. - */ - @Override - public void onMessage(Boolean inChillMOde, EventPublisher publisher) { - chillModePrecheck.setInChillMode(inChillMOde); - } - - /** - * Set chill mode status based on . - */ - public boolean getChillModeStatus() { - return chillModePrecheck.isInChillMode(); - } - - - /** - * Query the System for Nodes. - * - * @param nodeState - NodeState that we are interested in matching. - * @return Set of Datanodes that match the NodeState. - */ - private Set queryNodeState(HddsProtos.NodeState nodeState) { - Set returnSet = new TreeSet<>(); - List tmp = scm.getScmNodeManager().getNodes(nodeState); - if ((tmp != null) && (tmp.size() > 0)) { - returnSet.addAll(tmp); - } - return returnSet; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index d9a0875385b87..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerActionsProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_ACTIONS; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT; - -/** - * This class is responsible for dispatching heartbeat from datanode to - * appropriate EventHandler at SCM. - */ -public final class SCMDatanodeHeartbeatDispatcher { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMDatanodeHeartbeatDispatcher.class); - - private final NodeManager nodeManager; - private final EventPublisher eventPublisher; - - - public SCMDatanodeHeartbeatDispatcher(NodeManager nodeManager, - EventPublisher eventPublisher) { - Preconditions.checkNotNull(nodeManager); - Preconditions.checkNotNull(eventPublisher); - this.nodeManager = nodeManager; - this.eventPublisher = eventPublisher; - } - - - /** - * Dispatches heartbeat to registered event handlers. - * - * @param heartbeat heartbeat to be dispatched. - * - * @return list of SCMCommand - */ - public List dispatch(SCMHeartbeatRequestProto heartbeat) { - DatanodeDetails datanodeDetails = - DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails()); - // should we dispatch heartbeat through eventPublisher? - List commands = nodeManager.processHeartbeat(datanodeDetails); - if (heartbeat.hasNodeReport()) { - LOG.debug("Dispatching Node Report."); - eventPublisher.fireEvent(NODE_REPORT, - new NodeReportFromDatanode(datanodeDetails, - heartbeat.getNodeReport())); - } - - if (heartbeat.hasContainerReport()) { - LOG.debug("Dispatching Container Report."); - eventPublisher.fireEvent(CONTAINER_REPORT, - new ContainerReportFromDatanode(datanodeDetails, - heartbeat.getContainerReport())); - - } - - if (heartbeat.hasContainerActions()) { - LOG.debug("Dispatching Container Actions."); - eventPublisher.fireEvent(CONTAINER_ACTIONS, - new ContainerActionsFromDatanode(datanodeDetails, - heartbeat.getContainerActions())); - } - - if (heartbeat.hasPipelineReports()) { - LOG.debug("Dispatching Pipeline Report."); - eventPublisher.fireEvent(PIPELINE_REPORT, - new PipelineReportFromDatanode(datanodeDetails, - heartbeat.getPipelineReports())); - - } - - if (heartbeat.hasPipelineActions()) { - LOG.debug("Dispatching Pipeline Actions."); - eventPublisher.fireEvent(PIPELINE_ACTIONS, - new PipelineActionsFromDatanode(datanodeDetails, - heartbeat.getPipelineActions())); - } - - if (heartbeat.getCommandStatusReportsCount() != 0) { - for (CommandStatusReportsProto commandStatusReport : heartbeat - .getCommandStatusReportsList()) { - eventPublisher.fireEvent(CMD_STATUS_REPORT, - new CommandStatusReportFromDatanode(datanodeDetails, - commandStatusReport)); - } - } - - return commands; - } - - /** - * Wrapper class for events with the datanode origin. - */ - public static class ReportFromDatanode { - - private final DatanodeDetails datanodeDetails; - - private final T report; - - public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) { - this.datanodeDetails = datanodeDetails; - this.report = report; - } - - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - public T getReport() { - return report; - } - } - - /** - * Node report event payload with origin. - */ - public static class NodeReportFromDatanode - extends ReportFromDatanode { - - public NodeReportFromDatanode(DatanodeDetails datanodeDetails, - NodeReportProto report) { - super(datanodeDetails, report); - } - } - - /** - * Container report event payload with origin. - */ - public static class ContainerReportFromDatanode - extends ReportFromDatanode { - - public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, - ContainerReportsProto report) { - super(datanodeDetails, report); - } - } - - /** - * Container action event payload with origin. - */ - public static class ContainerActionsFromDatanode - extends ReportFromDatanode { - - public ContainerActionsFromDatanode(DatanodeDetails datanodeDetails, - ContainerActionsProto actions) { - super(datanodeDetails, actions); - } - } - - /** - * Pipeline report event payload with origin. - */ - public static class PipelineReportFromDatanode - extends ReportFromDatanode { - - public PipelineReportFromDatanode(DatanodeDetails datanodeDetails, - PipelineReportsProto report) { - super(datanodeDetails, report); - } - } - - /** - * Pipeline action event payload with origin. - */ - public static class PipelineActionsFromDatanode - extends ReportFromDatanode { - - public PipelineActionsFromDatanode(DatanodeDetails datanodeDetails, - PipelineActionsProto actions) { - super(datanodeDetails, actions); - } - } - - /** - * Container report event payload with origin. - */ - public static class CommandStatusReportFromDatanode - extends ReportFromDatanode { - - public CommandStatusReportFromDatanode(DatanodeDetails datanodeDetails, - CommandStatusReportsProto report) { - super(datanodeDetails, report); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java deleted file mode 100644 index 9c6fa88e0cebe..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ /dev/null @@ -1,315 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.protobuf.BlockingService; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; - -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto - .Type.closeContainerCommand; -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto - .Type.deleteBlocksCommand; -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type - .replicateContainerCommand; -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto - .Type.reregisterCommand; - - - -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .PipelineReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolServerSideTranslatorPB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.LinkedList; -import java.util.List; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT; -import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer; -import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; - -/** - * Protocol Handler for Datanode Protocol. - */ -public class SCMDatanodeProtocolServer implements - StorageContainerDatanodeProtocol { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeProtocolServer.class); - - /** - * The RPC server that listens to requests from DataNodes. - */ - private final RPC.Server datanodeRpcServer; - - private final StorageContainerManager scm; - private final InetSocketAddress datanodeRpcAddress; - private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher; - private final EventPublisher eventPublisher; - - public SCMDatanodeProtocolServer(final OzoneConfiguration conf, - StorageContainerManager scm, EventPublisher eventPublisher) - throws IOException { - - Preconditions.checkNotNull(scm, "SCM cannot be null"); - Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be null"); - - this.scm = scm; - this.eventPublisher = eventPublisher; - final int handlerCount = - conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, - OZONE_SCM_HANDLER_COUNT_DEFAULT); - - heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher( - scm.getScmNodeManager(), eventPublisher); - - RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - BlockingService dnProtoPbService = - StorageContainerDatanodeProtocolProtos - .StorageContainerDatanodeProtocolService - .newReflectiveBlockingService( - new StorageContainerDatanodeProtocolServerSideTranslatorPB( - this)); - - InetSocketAddress datanodeRpcAddr = - HddsServerUtil.getScmDataNodeBindAddress(conf); - - datanodeRpcServer = - startRpcServer( - conf, - datanodeRpcAddr, - StorageContainerDatanodeProtocolPB.class, - dnProtoPbService, - handlerCount); - - datanodeRpcAddress = - updateRPCListenAddress( - conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr, - datanodeRpcServer); - - } - - public void start() { - LOG.info( - StorageContainerManager.buildRpcServerStartMessage( - "RPC server for DataNodes", datanodeRpcAddress)); - datanodeRpcServer.start(); - } - - public InetSocketAddress getDatanodeRpcAddress() { - return datanodeRpcAddress; - } - - @Override - public SCMVersionResponseProto getVersion(SCMVersionRequestProto - versionRequest) - throws IOException { - return scm.getScmNodeManager().getVersion(versionRequest) - .getProtobufMessage(); - } - - @Override - public SCMRegisteredResponseProto register( - HddsProtos.DatanodeDetailsProto datanodeDetailsProto, - NodeReportProto nodeReport, - ContainerReportsProto containerReportsProto, - PipelineReportsProto pipelineReportsProto) - throws IOException { - DatanodeDetails datanodeDetails = DatanodeDetails - .getFromProtoBuf(datanodeDetailsProto); - // TODO : Return the list of Nodes that forms the SCM HA. - RegisteredCommand registeredCommand = scm.getScmNodeManager() - .register(datanodeDetails, nodeReport, pipelineReportsProto); - if (registeredCommand.getError() - == SCMRegisteredResponseProto.ErrorCode.success) { - scm.getScmContainerManager().processContainerReports(datanodeDetails, - containerReportsProto, true); - eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - new NodeRegistrationContainerReport(datanodeDetails, - containerReportsProto)); - eventPublisher.fireEvent(PIPELINE_REPORT, - new PipelineReportFromDatanode(datanodeDetails, - pipelineReportsProto)); - } - return getRegisteredResponse(registeredCommand); - } - - @VisibleForTesting - public static SCMRegisteredResponseProto getRegisteredResponse( - RegisteredCommand cmd) { - return SCMRegisteredResponseProto.newBuilder() - // TODO : Fix this later when we have multiple SCM support. - // .setAddressList(addressList) - .setErrorCode(cmd.getError()) - .setClusterID(cmd.getClusterID()) - .setDatanodeUUID(cmd.getDatanodeUUID()) - .build(); - } - - @Override - public SCMHeartbeatResponseProto sendHeartbeat( - SCMHeartbeatRequestProto heartbeat) throws IOException { - List cmdResponses = new LinkedList<>(); - for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) { - cmdResponses.add(getCommandResponse(cmd)); - } - return SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid()) - .addAllCommands(cmdResponses).build(); - } - - /** - * Returns a SCMCommandRepose from the SCM Command. - * - * @param cmd - Cmd - * @return SCMCommandResponseProto - * @throws IOException - */ - @VisibleForTesting - public SCMCommandProto getCommandResponse(SCMCommand cmd) - throws IOException { - SCMCommandProto.Builder builder = - SCMCommandProto.newBuilder(); - switch (cmd.getType()) { - case reregisterCommand: - return builder - .setCommandType(reregisterCommand) - .setReregisterCommandProto(ReregisterCommandProto - .getDefaultInstance()) - .build(); - case deleteBlocksCommand: - // Once SCM sends out the deletion message, increment the count. - // this is done here instead of when SCM receives the ACK, because - // DN might not be able to response the ACK for sometime. In case - // it times out, SCM needs to re-send the message some more times. - List txs = - ((DeleteBlocksCommand) cmd) - .blocksTobeDeleted() - .stream() - .map(tx -> tx.getTxID()) - .collect(Collectors.toList()); - scm.getScmBlockManager().getDeletedBlockLog().incrementCount(txs); - return builder - .setCommandType(deleteBlocksCommand) - .setDeleteBlocksCommandProto(((DeleteBlocksCommand) cmd).getProto()) - .build(); - case closeContainerCommand: - return builder - .setCommandType(closeContainerCommand) - .setCloseContainerCommandProto( - ((CloseContainerCommand) cmd).getProto()) - .build(); - case replicateContainerCommand: - return builder - .setCommandType(replicateContainerCommand) - .setReplicateContainerCommandProto( - ((ReplicateContainerCommand)cmd).getProto()) - .build(); - default: - throw new IllegalArgumentException("Not implemented"); - } - } - - - public void join() throws InterruptedException { - LOG.trace("Join RPC server for DataNodes"); - datanodeRpcServer.join(); - } - - public void stop() { - try { - LOG.info("Stopping the RPC server for DataNodes"); - datanodeRpcServer.stop(); - } catch (Exception ex) { - LOG.error(" datanodeRpcServer stop failed.", ex); - } - IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager()); - } - - /** - * Wrapper class for events with the datanode origin. - */ - public static class NodeRegistrationContainerReport extends - ReportFromDatanode { - - public NodeRegistrationContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto report) { - super(datanodeDetails, report); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java deleted file mode 100644 index 22d4d560dd58c..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; - -import java.util.Map; - -/** - * - * This is the JMX management interface for scm information. - */ -@InterfaceAudience.Private -public interface SCMMXBean extends ServiceRuntimeInfo { - - /** - * Get the SCM RPC server port that used to listen to datanode requests. - * @return SCM datanode RPC server port - */ - String getDatanodeRpcPort(); - - /** - * Get the SCM RPC server port that used to listen to client requests. - * @return SCM client RPC server port - */ - String getClientRpcPort(); - - /** - * Get container report info that includes container IO stats of nodes. - * @return The datanodeUUid to report json string mapping - */ - Map getContainerReport(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java deleted file mode 100644 index be6c1af18681f..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.ozone.common.Storage; - -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; -import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR; - -/** - * SCMStorage is responsible for management of the StorageDirectories used by - * the SCM. - */ -public class SCMStorage extends Storage { - - /** - * Construct SCMStorage. - * @throws IOException if any directories are inaccessible. - */ - public SCMStorage(OzoneConfiguration conf) throws IOException { - super(NodeType.SCM, getOzoneMetaDirPath(conf), STORAGE_DIR); - } - - public void setScmId(String scmId) throws IOException { - if (getState() == StorageState.INITIALIZED) { - throw new IOException("SCM is already initialized."); - } else { - getStorageInfo().setProperty(SCM_ID, scmId); - } - } - - /** - * Retrieves the SCM ID from the version file. - * @return SCM_ID - */ - public String getScmId() { - return getStorageInfo().getProperty(SCM_ID); - } - - @Override - protected Properties getNodeProperties() { - String scmId = getScmId(); - if (scmId == null) { - scmId = UUID.randomUUID().toString(); - } - Properties scmProperties = new Properties(); - scmProperties.setProperty(SCM_ID, scmId); - return scmProperties; - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java deleted file mode 100644 index a6a967c3764b1..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ /dev/null @@ -1,942 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.protobuf.BlockingService; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.block.BlockManager; -import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; -import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; -import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler; -import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; -import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher; -import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler; -import org.apache.hadoop.hdds.scm.container.ContainerMapping; -import org.apache.hadoop.hdds.scm.container.ContainerReportHandler; -import org.apache.hadoop.hdds.scm.container.Mapping; -import org.apache.hadoop.hdds.scm.container.replication - .ReplicationActivityStatus; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.scm.node.DeadNodeHandler; -import org.apache.hadoop.hdds.scm.node.NewNodeHandler; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodeReportHandler; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.hdds.scm.node.StaleNodeHandler; -import org.apache.hadoop.hdds.scm.pipelines.PipelineCloseHandler; -import org.apache.hadoop.hdds.scm.pipelines.PipelineActionEventHandler; -import org.apache.hadoop.hdds.scm.pipelines.PipelineReportHandler; -import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.common.StorageInfo; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.GenericOptionsParser; -import org.apache.hadoop.util.StringUtils; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .HDDS_SCM_WATCHER_TIMEOUT_DEFAULT; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.IOException; -import java.io.PrintStream; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.apache.hadoop.util.ExitUtil.terminate; - -/** - * StorageContainerManager is the main entry point for the service that - * provides information about - * which SCM nodes host containers. - * - *

DataNodes report to StorageContainerManager using heartbeat messages. - * SCM allocates containers - * and returns a pipeline. - * - *

A client once it gets a pipeline (a list of datanodes) will connect to - * the datanodes and - * create a container, which then can be used to store data. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"}) -public final class StorageContainerManager extends ServiceRuntimeInfoImpl - implements SCMMXBean { - - private static final Logger LOG = LoggerFactory - .getLogger(StorageContainerManager.class); - private static final String USAGE = - "Usage: \n ozone scm [genericOptions] " - + "[ " - + StartupOption.INIT.getName() - + " [ " - + StartupOption.CLUSTERID.getName() - + " ] ]\n " - + "ozone scm [genericOptions] [ " - + StartupOption.GENCLUSTERID.getName() - + " ]\n " - + "ozone scm [ " - + StartupOption.HELP.getName() - + " ]\n"; - /** - * SCM metrics. - */ - private static SCMMetrics metrics; - - /* - * RPC Endpoints exposed by SCM. - */ - private final SCMDatanodeProtocolServer datanodeProtocolServer; - private final SCMBlockProtocolServer blockProtocolServer; - private final SCMClientProtocolServer clientProtocolServer; - - /* - * State Managers of SCM. - */ - private final NodeManager scmNodeManager; - private final Mapping scmContainerManager; - private final BlockManager scmBlockManager; - private final SCMStorage scmStorage; - - private final EventQueue eventQueue; - /* - * HTTP endpoint for JMX access. - */ - private final StorageContainerManagerHttpServer httpServer; - /** - * SCM super user. - */ - private final String scmUsername; - private final Collection scmAdminUsernames; - /** - * SCM mxbean. - */ - private ObjectName scmInfoBeanName; - /** - * Key = DatanodeUuid, value = ContainerStat. - */ - private Cache containerReportCache; - - private final ReplicationManager replicationManager; - - private final LeaseManager commandWatcherLeaseManager; - - private final ReplicationActivityStatus replicationStatus; - private final SCMChillModeManager scmChillModeManager; - - /** - * Creates a new StorageContainerManager. Configuration will be updated - * with information on the - * actual listening addresses used for RPC servers. - * - * @param conf configuration - */ - private StorageContainerManager(OzoneConfiguration conf) throws IOException { - - final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - - StorageContainerManager.initMetrics(); - initContainerReportCache(conf); - - scmStorage = new SCMStorage(conf); - if (scmStorage.getState() != StorageState.INITIALIZED) { - throw new SCMException("SCM not initialized.", ResultCodes - .SCM_NOT_INITIALIZED); - } - - eventQueue = new EventQueue(); - - scmNodeManager = new SCMNodeManager( - conf, scmStorage.getClusterID(), this, eventQueue); - scmContainerManager = new ContainerMapping( - conf, getScmNodeManager(), cacheSize, eventQueue); - scmBlockManager = new BlockManagerImpl( - conf, getScmNodeManager(), scmContainerManager, eventQueue); - - replicationStatus = new ReplicationActivityStatus(); - - CloseContainerEventHandler closeContainerHandler = - new CloseContainerEventHandler(scmContainerManager); - NodeReportHandler nodeReportHandler = - new NodeReportHandler(scmNodeManager); - PipelineReportHandler pipelineReportHandler = - new PipelineReportHandler( - scmContainerManager.getPipelineSelector()); - CommandStatusReportHandler cmdStatusReportHandler = - new CommandStatusReportHandler(); - - NewNodeHandler newNodeHandler = new NewNodeHandler(scmNodeManager); - StaleNodeHandler staleNodeHandler = - new StaleNodeHandler(scmContainerManager.getPipelineSelector()); - DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager, - getScmContainerManager().getStateManager()); - ContainerActionsHandler actionsHandler = new ContainerActionsHandler(); - PendingDeleteHandler pendingDeleteHandler = - new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService()); - - ContainerReportHandler containerReportHandler = - new ContainerReportHandler(scmContainerManager, scmNodeManager, - replicationStatus); - scmChillModeManager = new SCMChillModeManager(conf, - getScmContainerManager().getStateManager().getAllContainers(), - eventQueue); - PipelineActionEventHandler pipelineActionEventHandler = - new PipelineActionEventHandler(); - - PipelineCloseHandler pipelineCloseHandler = - new PipelineCloseHandler(scmContainerManager.getPipelineSelector()); - - long watcherTimeout = - conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT, - HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - - commandWatcherLeaseManager = new LeaseManager<>("CommandWatcher", - watcherTimeout); - - RetriableDatanodeEventWatcher retriableDatanodeEventWatcher = - new RetriableDatanodeEventWatcher<>( - SCMEvents.RETRIABLE_DATANODE_COMMAND, - SCMEvents.DELETE_BLOCK_STATUS, - commandWatcherLeaseManager); - retriableDatanodeEventWatcher.start(eventQueue); - - //TODO: support configurable containerPlacement policy - ContainerPlacementPolicy containerPlacementPolicy = - new SCMContainerPlacementCapacity(scmNodeManager, conf); - - replicationManager = new ReplicationManager(containerPlacementPolicy, - scmContainerManager.getStateManager(), eventQueue, - commandWatcherLeaseManager); - - // setup CloseContainer watcher - CloseContainerWatcher closeContainerWatcher = - new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - SCMEvents.CLOSE_CONTAINER_STATUS, commandWatcherLeaseManager, - scmContainerManager); - closeContainerWatcher.start(eventQueue); - - scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys - .OZONE_ADMINISTRATORS); - scmUsername = UserGroupInformation.getCurrentUser().getUserName(); - if (!scmAdminUsernames.contains(scmUsername)) { - scmAdminUsernames.add(scmUsername); - } - - datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this, - eventQueue); - blockProtocolServer = new SCMBlockProtocolServer(conf, this); - clientProtocolServer = new SCMClientProtocolServer(conf, this); - httpServer = new StorageContainerManagerHttpServer(conf); - - eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager); - eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager); - eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler); - eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler); - eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler); - eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler); - eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler); - eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler); - eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler); - eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler); - eventQueue.addHandler(SCMEvents.START_REPLICATION, - replicationStatus.getReplicationStatusListener()); - eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, - replicationStatus.getChillModeStatusListener()); - eventQueue - .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler); - eventQueue.addHandler(SCMEvents.DELETE_BLOCK_STATUS, - (DeletedBlockLogImpl) scmBlockManager.getDeletedBlockLog()); - eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS, - pipelineActionEventHandler); - eventQueue.addHandler(SCMEvents.PIPELINE_CLOSE, pipelineCloseHandler); - eventQueue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - scmChillModeManager); - eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, - (BlockManagerImpl) scmBlockManager); - eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, clientProtocolServer); - eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler); - - registerMXBean(); - } - - /** - * Builds a message for logging startup information about an RPC server. - * - * @param description RPC server description - * @param addr RPC server listening address - * @return server startup message - */ - public static String buildRpcServerStartMessage(String description, - InetSocketAddress addr) { - return addr != null - ? String.format("%s is listening at %s", description, addr.toString()) - : String.format("%s not started", description); - } - - /** - * Starts an RPC server, if configured. - * - * @param conf configuration - * @param addr configured address of RPC server - * @param protocol RPC protocol provided by RPC server - * @param instance RPC protocol implementation instance - * @param handlerCount RPC server handler count - * @return RPC server - * @throws IOException if there is an I/O error while creating RPC server - */ - public static RPC.Server startRpcServer( - OzoneConfiguration conf, - InetSocketAddress addr, - Class protocol, - BlockingService instance, - int handlerCount) - throws IOException { - RPC.Server rpcServer = - new RPC.Builder(conf) - .setProtocol(protocol) - .setInstance(instance) - .setBindAddress(addr.getHostString()) - .setPort(addr.getPort()) - .setNumHandlers(handlerCount) - .setVerbose(false) - .setSecretManager(null) - .build(); - - DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer); - return rpcServer; - } - - /** - * Main entry point for starting StorageContainerManager. - * - * @param argv arguments - * @throws IOException if startup fails due to I/O error - */ - public static void main(String[] argv) throws IOException { - if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) { - System.exit(0); - } - try { - OzoneConfiguration conf = new OzoneConfiguration(); - GenericOptionsParser hParser = new GenericOptionsParser(conf, argv); - if (!hParser.isParseSuccessful()) { - System.err.println("USAGE: " + USAGE + "\n"); - hParser.printGenericCommandUsage(System.err); - System.exit(1); - } - StorageContainerManager scm = createSCM( - hParser.getRemainingArgs(), conf, true); - if (scm != null) { - scm.start(); - scm.join(); - } - } catch (Throwable t) { - LOG.error("Failed to start the StorageContainerManager.", t); - terminate(1, t); - } - } - - private static void printUsage(PrintStream out) { - out.println(USAGE + "\n"); - } - - /** - * Create an SCM instance based on the supplied command-line arguments. - * - * This method is intended for unit tests only. It suppresses the - * startup/shutdown message and skips registering Unix signal - * handlers. - * - * @param args command line arguments. - * @param conf HDDS configuration - * @return SCM instance - * @throws IOException - */ - @VisibleForTesting - public static StorageContainerManager createSCM( - String[] args, OzoneConfiguration conf) throws IOException { - return createSCM(args, conf, false); - } - - /** - * Create an SCM instance based on the supplied command-line arguments. - * - * @param args command-line arguments. - * @param conf HDDS configuration - * @param printBanner if true, then log a verbose startup message. - * @return SCM instance - * @throws IOException - */ - private static StorageContainerManager createSCM( - String[] args, - OzoneConfiguration conf, - boolean printBanner) throws IOException { - String[] argv = (args == null) ? new String[0] : args; - if (!HddsUtils.isHddsEnabled(conf)) { - System.err.println( - "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" + - " is set to false"); - System.exit(1); - } - StartupOption startOpt = parseArguments(argv); - if (startOpt == null) { - printUsage(System.err); - terminate(1); - return null; - } - switch (startOpt) { - case INIT: - if (printBanner) { - StringUtils.startupShutdownMessage(StorageContainerManager.class, argv, - LOG); - } - terminate(scmInit(conf) ? 0 : 1); - return null; - case GENCLUSTERID: - if (printBanner) { - StringUtils.startupShutdownMessage(StorageContainerManager.class, argv, - LOG); - } - System.out.println("Generating new cluster id:"); - System.out.println(StorageInfo.newClusterID()); - terminate(0); - return null; - case HELP: - printUsage(System.err); - terminate(0); - return null; - default: - if (printBanner) { - StringUtils.startupShutdownMessage(StorageContainerManager.class, argv, - LOG); - } - return new StorageContainerManager(conf); - } - } - - /** - * Routine to set up the Version info for StorageContainerManager. - * - * @param conf OzoneConfiguration - * @return true if SCM initialization is successful, false otherwise. - * @throws IOException if init fails due to I/O error - */ - public static boolean scmInit(OzoneConfiguration conf) throws IOException { - SCMStorage scmStorage = new SCMStorage(conf); - StorageState state = scmStorage.getState(); - if (state != StorageState.INITIALIZED) { - try { - String clusterId = StartupOption.INIT.getClusterId(); - if (clusterId != null && !clusterId.isEmpty()) { - scmStorage.setClusterId(clusterId); - } - scmStorage.initialize(); - System.out.println( - "SCM initialization succeeded." - + "Current cluster id for sd=" - + scmStorage.getStorageDir() - + ";cid=" - + scmStorage.getClusterID()); - return true; - } catch (IOException ioe) { - LOG.error("Could not initialize SCM version file", ioe); - return false; - } - } else { - System.out.println( - "SCM already initialized. Reusing existing" - + " cluster id for sd=" - + scmStorage.getStorageDir() - + ";cid=" - + scmStorage.getClusterID()); - return true; - } - } - - private static StartupOption parseArguments(String[] args) { - int argsLen = (args == null) ? 0 : args.length; - StartupOption startOpt = StartupOption.HELP; - if (argsLen == 0) { - startOpt = StartupOption.REGULAR; - } - for (int i = 0; i < argsLen; i++) { - String cmd = args[i]; - if (StartupOption.INIT.getName().equalsIgnoreCase(cmd)) { - startOpt = StartupOption.INIT; - if (argsLen > 3) { - return null; - } - for (i = i + 1; i < argsLen; i++) { - if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) { - i++; - if (i < argsLen && !args[i].isEmpty()) { - startOpt.setClusterId(args[i]); - } else { - // if no cluster id specified or is empty string, return null - LOG.error( - "Must specify a valid cluster ID after the " - + StartupOption.CLUSTERID.getName() - + " flag"); - return null; - } - } else { - return null; - } - } - } else { - if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) { - if (argsLen > 1) { - return null; - } - startOpt = StartupOption.GENCLUSTERID; - } - } - } - return startOpt; - } - - /** - * Initialize SCM metrics. - */ - public static void initMetrics() { - metrics = SCMMetrics.create(); - } - - /** - * Return SCM metrics instance. - */ - public static SCMMetrics getMetrics() { - return metrics == null ? SCMMetrics.create() : metrics; - } - - public SCMStorage getScmStorage() { - return scmStorage; - } - - public SCMDatanodeProtocolServer getDatanodeProtocolServer() { - return datanodeProtocolServer; - } - - public SCMBlockProtocolServer getBlockProtocolServer() { - return blockProtocolServer; - } - - public SCMClientProtocolServer getClientProtocolServer() { - return clientProtocolServer; - } - - /** - * Initialize container reports cache that sent from datanodes. - * - * @param conf - */ - private void initContainerReportCache(OzoneConfiguration conf) { - containerReportCache = - CacheBuilder.newBuilder() - .expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS) - .maximumSize(Integer.MAX_VALUE) - .removalListener( - new RemovalListener() { - @Override - public void onRemoval( - RemovalNotification - removalNotification) { - synchronized (containerReportCache) { - ContainerStat stat = removalNotification.getValue(); - // remove invalid container report - metrics.decrContainerStat(stat); - LOG.debug( - "Remove expired container stat entry for datanode: " + - "{}.", - removalNotification.getKey()); - } - } - }) - .build(); - } - - private void registerMXBean() { - Map jmxProperties = new HashMap<>(); - jmxProperties.put("component", "ServerRuntime"); - this.scmInfoBeanName = - MBeans.register( - "StorageContainerManager", "StorageContainerManagerInfo", - jmxProperties, this); - } - - private void unregisterMXBean() { - if (this.scmInfoBeanName != null) { - MBeans.unregister(this.scmInfoBeanName); - this.scmInfoBeanName = null; - } - } - - @VisibleForTesting - public ContainerInfo getContainerInfo(long containerID) throws - IOException { - return scmContainerManager.getContainer(containerID); - } - - /** - * Returns listening address of StorageLocation Protocol RPC server. - * - * @return listen address of StorageLocation RPC server - */ - @VisibleForTesting - public InetSocketAddress getClientRpcAddress() { - return getClientProtocolServer().getClientRpcAddress(); - } - - @Override - public String getClientRpcPort() { - InetSocketAddress addr = getClientRpcAddress(); - return addr == null ? "0" : Integer.toString(addr.getPort()); - } - - /** - * Returns listening address of StorageDatanode Protocol RPC server. - * - * @return Address where datanode are communicating. - */ - public InetSocketAddress getDatanodeRpcAddress() { - return getDatanodeProtocolServer().getDatanodeRpcAddress(); - } - - @Override - public String getDatanodeRpcPort() { - InetSocketAddress addr = getDatanodeRpcAddress(); - return addr == null ? "0" : Integer.toString(addr.getPort()); - } - - /** - * Start service. - */ - public void start() throws IOException { - LOG.info( - buildRpcServerStartMessage( - "StorageContainerLocationProtocol RPC server", - getClientRpcAddress())); - DefaultMetricsSystem.initialize("StorageContainerManager"); - - commandWatcherLeaseManager.start(); - getClientProtocolServer().start(); - - LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " + - "server", getBlockProtocolServer().getBlockRpcAddress())); - getBlockProtocolServer().start(); - - LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " + - "server", getDatanodeProtocolServer().getDatanodeRpcAddress())); - getDatanodeProtocolServer().start(); - - httpServer.start(); - scmBlockManager.start(); - replicationStatus.start(); - replicationManager.start(); - setStartTime(); - } - - /** - * Stop service. - */ - public void stop() { - - try { - LOG.info("Stopping Replication Activity Status tracker."); - replicationStatus.close(); - } catch (Exception ex) { - LOG.error("Replication Activity Status tracker stop failed.", ex); - } - - - try { - LOG.info("Stopping Replication Manager Service."); - replicationManager.stop(); - } catch (Exception ex) { - LOG.error("Replication manager service stop failed.", ex); - } - - try { - LOG.info("Stopping Lease Manager of the command watchers"); - commandWatcherLeaseManager.shutdown(); - } catch (Exception ex) { - LOG.error("Lease Manager of the command watchers stop failed"); - } - - try { - LOG.info("Stopping datanode service RPC server"); - getDatanodeProtocolServer().stop(); - - } catch (Exception ex) { - LOG.error("Storage Container Manager datanode RPC stop failed.", ex); - } - - try { - LOG.info("Stopping block service RPC server"); - getBlockProtocolServer().stop(); - } catch (Exception ex) { - LOG.error("Storage Container Manager blockRpcServer stop failed.", ex); - } - - try { - LOG.info("Stopping the StorageContainerLocationProtocol RPC server"); - getClientProtocolServer().stop(); - } catch (Exception ex) { - LOG.error("Storage Container Manager clientRpcServer stop failed.", ex); - } - - try { - LOG.info("Stopping Storage Container Manager HTTP server."); - httpServer.stop(); - } catch (Exception ex) { - LOG.error("Storage Container Manager HTTP server stop failed.", ex); - } - - try { - LOG.info("Stopping Block Manager Service."); - scmBlockManager.stop(); - } catch (Exception ex) { - LOG.error("SCM block manager service stop failed.", ex); - } - - if (containerReportCache != null) { - containerReportCache.invalidateAll(); - containerReportCache.cleanUp(); - } - - if (metrics != null) { - metrics.unRegister(); - } - - unregisterMXBean(); - // Event queue must be stopped before the DB store is closed at the end. - try { - LOG.info("Stopping SCM Event Queue."); - eventQueue.close(); - } catch (Exception ex) { - LOG.error("SCM Event Queue stop failed", ex); - } - IOUtils.cleanupWithLogger(LOG, scmContainerManager); - } - - /** - * Wait until service has completed shutdown. - */ - public void join() { - try { - getBlockProtocolServer().join(); - getClientProtocolServer().join(); - getDatanodeProtocolServer().join(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.info("Interrupted during StorageContainerManager join."); - } - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param nodestate Healthy, Dead etc. - * @return int -- count - */ - public int getNodeCount(NodeState nodestate) { - return scmNodeManager.getNodeCount(nodestate); - } - - /** - * Returns SCM container manager. - */ - @VisibleForTesting - public Mapping getScmContainerManager() { - return scmContainerManager; - } - - /** - * Returns node manager. - * - * @return - Node Manager - */ - @VisibleForTesting - public NodeManager getScmNodeManager() { - return scmNodeManager; - } - - @VisibleForTesting - public BlockManager getScmBlockManager() { - return scmBlockManager; - } - - public void checkAdminAccess(String remoteUser) throws IOException { - if (remoteUser != null) { - if (!scmAdminUsernames.contains(remoteUser)) { - throw new IOException( - "Access denied for user " + remoteUser + ". Superuser privilege " + - "is required."); - } - } - } - - /** - * Invalidate container stat entry for given datanode. - * - * @param datanodeUuid - */ - public void removeContainerReport(String datanodeUuid) { - synchronized (containerReportCache) { - containerReportCache.invalidate(datanodeUuid); - } - } - - /** - * Get container stat of specified datanode. - * - * @param datanodeUuid - * @return - */ - public ContainerStat getContainerReport(String datanodeUuid) { - ContainerStat stat = null; - synchronized (containerReportCache) { - stat = containerReportCache.getIfPresent(datanodeUuid); - } - - return stat; - } - - /** - * Returns a view of the container stat entries. Modifications made to the - * map will directly - * affect the cache. - * - * @return - */ - public ConcurrentMap getContainerReportCache() { - return containerReportCache.asMap(); - } - - @Override - public Map getContainerReport() { - Map id2StatMap = new HashMap<>(); - synchronized (containerReportCache) { - ConcurrentMap map = containerReportCache.asMap(); - for (Map.Entry entry : map.entrySet()) { - id2StatMap.put(entry.getKey(), entry.getValue().toJsonString()); - } - } - - return id2StatMap; - } - - public boolean isInChillMode() { - return scmChillModeManager.getInChillMode(); - } - - /** - * Returns EventPublisher. - */ - public EventPublisher getEventQueue(){ - return eventQueue; - } - - /** - * Force SCM out of chill mode. - */ - public boolean exitChillMode() { - scmChillModeManager.exitChillMode(eventQueue); - return true; - } - - @VisibleForTesting - public double getCurrentContainerThreshold() { - return scmChillModeManager.getCurrentContainerThreshold(); - } - - /** - * Startup options. - */ - public enum StartupOption { - INIT("-init"), - CLUSTERID("-clusterid"), - GENCLUSTERID("-genclusterid"), - REGULAR("-regular"), - HELP("-help"); - - private final String name; - private String clusterId = null; - - StartupOption(String arg) { - this.name = arg; - } - - public String getClusterId() { - return clusterId; - } - - public void setClusterId(String cid) { - if (cid != null && !cid.isEmpty()) { - clusterId = cid; - } - } - - public String getName() { - return name; - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java deleted file mode 100644 index 75b203647a69e..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.server.BaseHttpServer; -import org.apache.hadoop.ozone.OzoneConfigKeys; - -import java.io.IOException; - -/** - * HttpServer2 wrapper for the Ozone Storage Container Manager. - */ -public class StorageContainerManagerHttpServer extends BaseHttpServer { - - public StorageContainerManagerHttpServer(Configuration conf) - throws IOException { - super(conf, "scm"); - } - - @Override protected String getHttpAddressKey() { - return ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY; - } - - @Override protected String getHttpBindHostKey() { - return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY; - } - - @Override protected String getHttpsAddressKey() { - return ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY; - } - - @Override protected String getHttpsBindHostKey() { - return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_HOST_KEY; - } - - @Override protected String getBindHostDefault() { - return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT; - } - - @Override protected int getHttpBindPortDefault() { - return ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT; - } - - @Override protected int getHttpsBindPortDefault() { - return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT; - } - - @Override protected String getKeytabFile() { - return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE; - } - - @Override protected String getSpnegoPrincipal() { - return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; - } - - @Override protected String getEnabledKey() { - return ScmConfigKeys.OZONE_SCM_HTTP_ENABLED_KEY; - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java deleted file mode 100644 index fe07272bb6c91..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java deleted file mode 100644 index 2a50bca9dc899..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.CommandStatusEvent; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventWatcher; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * EventWatcher for start events and completion events with payload of type - * RetriablePayload and RetriableCompletionPayload respectively. - */ -public class RetriableDatanodeEventWatcher - extends EventWatcher { - - public static final Logger LOG = - LoggerFactory.getLogger(RetriableDatanodeEventWatcher.class); - - public RetriableDatanodeEventWatcher(Event startEvent, - Event completionEvent, LeaseManager leaseManager) { - super(startEvent, completionEvent, leaseManager); - } - - @Override - protected void onTimeout(EventPublisher publisher, - CommandForDatanode payload) { - LOG.info("RetriableDatanodeCommand type={} with id={} timed out. Retrying.", - payload.getCommand().getType(), payload.getId()); - //put back to the original queue - publisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, payload); - } - - @Override - protected void onFinished(EventPublisher publisher, - CommandForDatanode payload) { - - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java deleted file mode 100644 index b1d28386a4f0a..0000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html deleted file mode 100644 index 2c943b626ffbd..0000000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - HDFS Storage Container Manager - - - - - - - - - - - -

- -
- - - -
- - - - - - - - - - - - diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html deleted file mode 100644 index 2666f81b5ff00..0000000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html deleted file mode 100644 index fca23baa0430d..0000000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ /dev/null @@ -1,60 +0,0 @@ - -

Node counts

- - - - - - - - -
{{typestat.key}}{{typestat.value}}
- -

Status

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Client Rpc port{{$ctrl.overview.jmx.ClientRpcPort}}
Datanode Rpc port{{$ctrl.overview.jmx.DatanodeRpcPort}}
Block Manager: Open containers{{$ctrl.blockmanagermetrics.OpenContainersNo}}
Node Manager: Minimum chill mode nodes{{$ctrl.nodemanagermetrics.MinimumChillModeNodes}}
Node Manager: Out-of-node chill mode{{$ctrl.nodemanagermetrics.OutOfNodeChillMode}}
Node Manager: Chill mode status{{$ctrl.nodemanagermetrics.ChillModeStatus}}
Node Manager: Manual chill mode{{$ctrl.nodemanagermetrics.InManualChillMode}}
\ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js deleted file mode 100644 index bcfa8b7b7b736..0000000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -(function () { - "use strict"; - angular.module('scm', ['ozone', 'nvd3']); - - angular.module('scm').component('scmOverview', { - templateUrl: 'scm-overview.html', - require: { - overview: "^overview" - }, - controller: function ($http) { - var ctrl = this; - $http.get("jmx?qry=Hadoop:service=BlockManager,name=*") - .then(function (result) { - ctrl.blockmanagermetrics = result.data.beans[0]; - }); - $http.get("jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo") - .then(function (result) { - ctrl.nodemanagermetrics = result.data.beans[0]; - }); - - var statusSortOrder = { - "HEALTHY": "a", - "STALE": "b", - "DEAD": "c", - "UNKNOWN": "z", - "DECOMMISSIONING": "x", - "DECOMMISSIONED": "y" - }; - ctrl.nodeOrder = function (v1, v2) { - //status with non defined sort order will be "undefined" - return ("" + statusSortOrder[v1.value]).localeCompare("" + statusSortOrder[v2.value]) - } - - } - }); - -})(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java deleted file mode 100644 index 6e01e5354b65b..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java +++ /dev/null @@ -1,308 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; - -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; - -import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test the HDDS server side utilities. - */ -public class HddsServerUtilTest { - - @Rule - public Timeout timeout = new Timeout(300000); - - @Rule - public ExpectedException thrown= ExpectedException.none(); - - /** - * Verify DataNode endpoint lookup failure if neither the client nor - * datanode endpoint are configured. - */ - @Test - public void testMissingScmDataNodeAddress() { - final Configuration conf = new OzoneConfiguration(); - thrown.expect(IllegalArgumentException.class); - HddsServerUtil.getScmAddressForDataNodes(conf); - } - - /** - * Verify that the datanode endpoint is parsed correctly. - * This tests the logic used by the DataNodes to determine which address - * to connect to. - */ - @Test - public void testGetScmDataNodeAddress() { - final Configuration conf = new OzoneConfiguration(); - - // First try a client address with just a host name. Verify it falls - // back to the default port. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // Next try a client address with just a host name and port. - // Verify the port is ignored and the default DataNode port is used. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - addr = HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and - // OZONE_SCM_DATANODE_ADDRESS_KEY. - // Verify that the latter overrides and the port number is still the - // default. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8"); - addr = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and - // OZONE_SCM_DATANODE_ADDRESS_KEY. - // Verify that the latter overrides and the port number from the latter is - // used. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8:200"); - addr = HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is(200)); - } - - - /** - * Verify that the client endpoint bind address is computed correctly. - * This tests the logic used by the SCM to determine its own bind address. - */ - @Test - public void testScmClientBindHostDefault() { - final Configuration conf = new OzoneConfiguration(); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY - // is set differently. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY - // is set differently. The port number from OZONE_SCM_CLIENT_ADDRESS_KEY - // should be respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(100)); - - // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected. - // Port number should be default if none is specified via - // OZONE_SCM_DATANODE_ADDRESS_KEY. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4"); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - - // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected. - // Port number from OZONE_SCM_CLIENT_ADDRESS_KEY should be - // respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is(100)); - } - - /** - * Verify that the DataNode endpoint bind address is computed correctly. - * This tests the logic used by the SCM to determine its own bind address. - */ - @Test - public void testScmDataNodeBindHostDefault() { - final Configuration conf = new OzoneConfiguration(); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY - // is set differently. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY - // is set differently. The port number from OZONE_SCM_DATANODE_ADDRESS_KEY - // should be respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(200)); - - // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected. - // Port number should be default if none is specified via - // OZONE_SCM_DATANODE_ADDRESS_KEY. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected. - // Port number from OZONE_SCM_DATANODE_ADDRESS_KEY should be - // respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is(200)); - } - - - - @Test - public void testGetSCMAddresses() { - final Configuration conf = new OzoneConfiguration(); - Collection addresses = null; - InetSocketAddress addr = null; - Iterator it = null; - - // Verify valid IP address setup - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "1.2.3.4"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(1)); - addr = addresses.iterator().next(); - assertThat(addr.getHostName(), is("1.2.3.4")); - assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT)); - - // Verify valid hostname setup - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(1)); - addr = addresses.iterator().next(); - assertThat(addr.getHostName(), is("scm1")); - assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT)); - - // Verify valid hostname and port - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(1)); - addr = addresses.iterator().next(); - assertThat(addr.getHostName(), is("scm1")); - assertThat(addr.getPort(), is(1234)); - - final HashMap hostsAndPorts = - new HashMap(); - hostsAndPorts.put("scm1", 1234); - hostsAndPorts.put("scm2", 2345); - hostsAndPorts.put("scm3", 3456); - - // Verify multiple hosts and port - conf.setStrings( - ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234,scm2:2345,scm3:3456"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(3)); - it = addresses.iterator(); - HashMap expected1 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { - InetSocketAddress current = it.next(); - assertTrue(expected1.remove(current.getHostName(), - current.getPort())); - } - assertTrue(expected1.isEmpty()); - - // Verify names with spaces - conf.setStrings( - ScmConfigKeys.OZONE_SCM_NAMES, " scm1:1234, scm2:2345 , scm3:3456 "); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(3)); - it = addresses.iterator(); - HashMap expected2 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { - InetSocketAddress current = it.next(); - assertTrue(expected2.remove(current.getHostName(), - current.getPort())); - } - assertTrue(expected2.isEmpty()); - - // Verify empty value - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, ""); - try { - addresses = getSCMAddresses(conf); - fail("Empty value should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - - // Verify invalid hostname - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "s..x..:1234"); - try { - addresses = getSCMAddresses(conf); - fail("An invalid hostname should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - - // Verify invalid port - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz"); - try { - addresses = getSCMAddresses(conf); - fail("An invalid port should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - - // Verify a mixed case (valid and invalid value both appears) - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234, scm:xyz"); - try { - addresses = getSCMAddresses(conf); - fail("An invalid value should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java deleted file mode 100644 index 50d1eedbbe79c..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import java.util.ArrayList; -import java.util.List; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer - .NodeRegistrationContainerReport; - -/** - * Stateless helper functions for Hdds tests. - */ -public final class HddsTestUtils { - - private HddsTestUtils() { - } - - /** - * Create Command Status report object. - * - * @param numOfContainers number of containers to be included in report. - * @return CommandStatusReportsProto - */ - public static NodeRegistrationContainerReport - createNodeRegistrationContainerReport(int numOfContainers) { - return new NodeRegistrationContainerReport( - TestUtils.randomDatanodeDetails(), - TestUtils.getRandomContainerReports(numOfContainers)); - } - - /** - * Create NodeRegistrationContainerReport object. - * - * @param dnContainers List of containers to be included in report - * @return NodeRegistrationContainerReport - */ - public static NodeRegistrationContainerReport - createNodeRegistrationContainerReport(List dnContainers) { - List - containers = new ArrayList<>(); - dnContainers.forEach(c -> { - containers.add(TestUtils.getRandomContainerInfo(c.getContainerID())); - }); - return new NodeRegistrationContainerReport( - TestUtils.randomDatanodeDetails(), - TestUtils.getContainerReports(containers)); - } - - /** - * Creates list of ContainerInfo. - * - * @param numContainers number of ContainerInfo to be included in list. - * @return List - */ - public static List getContainerInfo(int numContainers) { - List containerInfoList = new ArrayList<>(); - for (int i = 0; i < numContainers; i++) { - ContainerInfo.Builder builder = new ContainerInfo.Builder(); - containerInfoList.add(builder - .setContainerID(RandomUtils.nextLong()) - .build()); - } - return containerInfoList; - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java deleted file mode 100644 index d9e1425517239..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.web.URLConnectionFactory; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.http.HttpConfig.Policy; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URL; -import java.net.URLConnection; -import java.util.Arrays; -import java.util.Collection; - -/** - * Test http server os SCM with various HTTP option. - */ -@RunWith(value = Parameterized.class) -public class TestStorageContainerManagerHttpServer { - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName()); - private static String keystoresDir; - private static String sslConfDir; - private static Configuration conf; - private static URLConnectionFactory connectionFactory; - - @Parameters public static Collection policy() { - Object[][] params = new Object[][] { - {HttpConfig.Policy.HTTP_ONLY}, - {HttpConfig.Policy.HTTPS_ONLY}, - {HttpConfig.Policy.HTTP_AND_HTTPS} }; - return Arrays.asList(params); - } - - private final HttpConfig.Policy policy; - - public TestStorageContainerManagerHttpServer(Policy policy) { - super(); - this.policy = policy; - } - - @BeforeClass public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - conf = new Configuration(); - keystoresDir = new File(BASEDIR).getAbsolutePath(); - sslConfDir = KeyStoreTestUtil.getClasspathDir( - TestStorageContainerManagerHttpServer.class); - KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); - connectionFactory = - URLConnectionFactory.newDefaultURLConnectionFactory(conf); - conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getClientSSLConfigFileName()); - conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getServerSSLConfigFileName()); - } - - @AfterClass public static void tearDown() throws Exception { - FileUtil.fullyDelete(new File(BASEDIR)); - KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); - } - - @Test public void testHttpPolicy() throws Exception { - conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name()); - conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0"); - - InetSocketAddress.createUnresolved("localhost", 0); - StorageContainerManagerHttpServer server = null; - try { - server = new StorageContainerManagerHttpServer(conf); - server.start(); - - Assert.assertTrue(implies(policy.isHttpEnabled(), - canAccess("http", server.getHttpAddress()))); - Assert.assertTrue( - implies(!policy.isHttpEnabled(), server.getHttpAddress() == null)); - - Assert.assertTrue(implies(policy.isHttpsEnabled(), - canAccess("https", server.getHttpsAddress()))); - Assert.assertTrue( - implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null)); - - } finally { - if (server != null) { - server.stop(); - } - } - } - - private static boolean canAccess(String scheme, InetSocketAddress addr) { - if (addr == null) { - return false; - } - try { - URL url = - new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx"); - URLConnection conn = connectionFactory.openConnection(url); - conn.connect(); - conn.getContent(); - } catch (IOException e) { - return false; - } - return true; - } - - private static boolean implies(boolean a, boolean b) { - return !a || b; - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java deleted file mode 100644 index 24a16c77bb1e0..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ /dev/null @@ -1,433 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageTypeProto; -import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ThreadLocalRandom; - -/** - * Stateless helper functions to handler scm/datanode connection. - */ -public final class TestUtils { - - private static ThreadLocalRandom random = ThreadLocalRandom.current(); - - private TestUtils() { - } - - /** - * Creates DatanodeDetails with random UUID. - * - * @return DatanodeDetails - */ - public static DatanodeDetails randomDatanodeDetails() { - return createDatanodeDetails(UUID.randomUUID()); - } - - /** - * Creates DatanodeDetails using the given UUID. - * - * @param uuid Datanode's UUID - * - * @return DatanodeDetails - */ - private static DatanodeDetails createDatanodeDetails(UUID uuid) { - String ipAddress = random.nextInt(256) - + "." + random.nextInt(256) - + "." + random.nextInt(256) - + "." + random.nextInt(256); - return createDatanodeDetails(uuid.toString(), "localhost", ipAddress); - } - - /** - * Generates DatanodeDetails from RegisteredCommand. - * - * @param registeredCommand registration response from SCM - * - * @return DatanodeDetails - */ - public static DatanodeDetails getDatanodeDetails( - RegisteredCommand registeredCommand) { - return createDatanodeDetails(registeredCommand.getDatanodeUUID(), - registeredCommand.getHostName(), registeredCommand.getIpAddress()); - } - - /** - * Creates DatanodeDetails with the given information. - * - * @param uuid Datanode's UUID - * @param hostname hostname of Datanode - * @param ipAddress ip address of Datanode - * - * @return DatanodeDetails - */ - private static DatanodeDetails createDatanodeDetails(String uuid, - String hostname, String ipAddress) { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(uuid) - .setHostName(hostname) - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } - - /** - * Creates a random DatanodeDetails and register it with the given - * NodeManager. - * - * @param nodeManager NodeManager - * - * @return DatanodeDetails - */ - public static DatanodeDetails createRandomDatanodeAndRegister( - SCMNodeManager nodeManager) { - return getDatanodeDetails( - nodeManager.register(randomDatanodeDetails(), null, - getRandomPipelineReports())); - } - - /** - * Get specified number of DatanodeDetails and register them with node - * manager. - * - * @param nodeManager node manager to register the datanode ids. - * @param count number of DatanodeDetails needed. - * - * @return list of DatanodeDetails - */ - public static List getListOfRegisteredDatanodeDetails( - SCMNodeManager nodeManager, int count) { - ArrayList datanodes = new ArrayList<>(); - for (int i = 0; i < count; i++) { - datanodes.add(createRandomDatanodeAndRegister(nodeManager)); - } - return datanodes; - } - - /** - * Generates a random NodeReport. - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport() { - return getRandomNodeReport(1); - } - - /** - * Generates random NodeReport with the given number of storage report in it. - * - * @param numberOfStorageReport number of storage report this node report - * should have - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) { - UUID nodeId = UUID.randomUUID(); - return getRandomNodeReport(nodeId, File.separator + nodeId, - numberOfStorageReport); - } - - /** - * Generates random NodeReport for the given nodeId with the given - * base path and number of storage report in it. - * - * @param nodeId datanode id - * @param basePath base path of storage directory - * @param numberOfStorageReport number of storage report - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(UUID nodeId, - String basePath, int numberOfStorageReport) { - List storageReports = new ArrayList<>(); - for (int i = 0; i < numberOfStorageReport; i++) { - storageReports.add(getRandomStorageReport(nodeId, - basePath + File.separator + i)); - } - return createNodeReport(storageReports); - } - - /** - * Creates NodeReport with the given storage reports. - * - * @param reports one or more storage report - * - * @return NodeReportProto - */ - public static NodeReportProto createNodeReport( - StorageReportProto... reports) { - return createNodeReport(Arrays.asList(reports)); - } - - /** - * Creates NodeReport with the given storage reports. - * - * @param reports storage reports to be included in the node report. - * - * @return NodeReportProto - */ - public static NodeReportProto createNodeReport( - List reports) { - NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder(); - nodeReport.addAllStorageReport(reports); - return nodeReport.build(); - } - - /** - * Generates random storage report. - * - * @param nodeId datanode id for which the storage report belongs to - * @param path path of the storage - * - * @return StorageReportProto - */ - public static StorageReportProto getRandomStorageReport(UUID nodeId, - String path) { - return createStorageReport(nodeId, path, - random.nextInt(1000), - random.nextInt(500), - random.nextInt(500), - StorageTypeProto.DISK); - } - - /** - * Creates storage report with the given information. - * - * @param nodeId datanode id - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type) { - Preconditions.checkNotNull(nodeId); - Preconditions.checkNotNull(path); - StorageReportProto.Builder srb = StorageReportProto.newBuilder(); - srb.setStorageUuid(nodeId.toString()) - .setStorageLocation(path) - .setCapacity(capacity) - .setScmUsed(used) - .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); - return srb.build(); - } - - - /** - * Generates random container reports. - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports() { - return getRandomContainerReports(1); - } - - /** - * Generates random container report with the given number of containers. - * - * @param numberOfContainers number of containers to be in container report - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports( - int numberOfContainers) { - List containerInfos = new ArrayList<>(); - for (int i = 0; i < numberOfContainers; i++) { - containerInfos.add(getRandomContainerInfo(i)); - } - return getContainerReports(containerInfos); - } - - - public static PipelineReportsProto getRandomPipelineReports() { - return PipelineReportsProto.newBuilder().build(); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos one or more ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - ContainerInfo... containerInfos) { - return getContainerReports(Arrays.asList(containerInfos)); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos list of ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - List containerInfos) { - ContainerReportsProto.Builder - reportsBuilder = ContainerReportsProto.newBuilder(); - for (ContainerInfo containerInfo : containerInfos) { - reportsBuilder.addReports(containerInfo); - } - return reportsBuilder.build(); - } - - /** - * Generates random ContainerInfo. - * - * @param containerId container id of the ContainerInfo - * - * @return ContainerInfo - */ - public static ContainerInfo getRandomContainerInfo(long containerId) { - return createContainerInfo(containerId, - OzoneConsts.GB * 5, - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(2), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5)); - } - - /** - * Creates ContainerInfo with the given details. - * - * @param containerId id of the container - * @param size size of container - * @param keyCount number of keys - * @param bytesUsed bytes used by the container - * @param readCount number of reads - * @param readBytes bytes read - * @param writeCount number of writes - * @param writeBytes bytes written - * - * @return ContainerInfo - */ - public static ContainerInfo createContainerInfo( - long containerId, long size, long keyCount, long bytesUsed, - long readCount, long readBytes, long writeCount, long writeBytes) { - return ContainerInfo.newBuilder() - .setContainerID(containerId) - .setSize(size) - .setKeyCount(keyCount) - .setUsed(bytesUsed) - .setReadCount(readCount) - .setReadBytes(readBytes) - .setWriteCount(writeCount) - .setWriteBytes(writeBytes) - .build(); - } - - /** - * Create Command Status report object. - * @return CommandStatusReportsProto - */ - public static CommandStatusReportsProto createCommandStatusReport( - List reports) { - CommandStatusReportsProto.Builder report = CommandStatusReportsProto - .newBuilder(); - report.addAllCmdStatus(reports); - return report.build(); - } - - public static - org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo - allocateContainer(ContainerStateManager containerStateManager) - throws IOException { - - PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class); - - Pipeline pipeline = new Pipeline("leader", HddsProtos.LifeCycleState.CLOSED, - HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.THREE, - PipelineID.randomId()); - - when(pipelineSelector - .getReplicationPipeline(HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline); - - return containerStateManager - .allocateContainer(pipelineSelector, - HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.THREE, "root").getContainerInfo(); - - } - - public static void closeContainer(ContainerStateManager containerStateManager, - org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo - container) - throws SCMException { - - containerStateManager.getContainerStateMap() - .updateState(container, container.getState(), LifeCycleState.CLOSING); - - containerStateManager.getContainerStateMap() - .updateState(container, container.getState(), LifeCycleState.CLOSED); - - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java deleted file mode 100644 index e70e44405d25d..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; - -import java.util.UUID; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerMapping; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMStorage; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.apache.hadoop.ozone.OzoneConsts.MB; - - -/** - * Tests for SCM Block Manager. - */ -public class TestBlockManager implements EventHandler { - private static ContainerMapping mapping; - private static MockNodeManager nodeManager; - private static BlockManagerImpl blockManager; - private static File testDir; - private final static long DEFAULT_BLOCK_SIZE = 128 * MB; - private static HddsProtos.ReplicationFactor factor; - private static HddsProtos.ReplicationType type; - private static String containerOwner = "OZONE"; - private static EventQueue eventQueue; - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Before - public void setUp() throws Exception { - Configuration conf = SCMTestUtils.getConf(); - - String path = GenericTestUtils - .getTempPath(TestBlockManager.class.getSimpleName()); - testDir = Paths.get(path).toFile(); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path); - eventQueue = new EventQueue(); - boolean folderExisted = testDir.exists() || testDir.mkdirs(); - if (!folderExisted) { - throw new IOException("Unable to create test directory path"); - } - nodeManager = new MockNodeManager(true, 10); - mapping = new ContainerMapping(conf, nodeManager, 128, eventQueue); - blockManager = new BlockManagerImpl(conf, - nodeManager, mapping, eventQueue); - eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager); - eventQueue.addHandler(SCMEvents.START_REPLICATION, this); - if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){ - factor = HddsProtos.ReplicationFactor.THREE; - type = HddsProtos.ReplicationType.RATIS; - } else { - factor = HddsProtos.ReplicationFactor.ONE; - type = HddsProtos.ReplicationType.STAND_ALONE; - } - } - - @After - public void cleanup() throws IOException { - blockManager.close(); - mapping.close(); - FileUtil.fullyDelete(testDir); - } - - private static StorageContainerManager getScm(OzoneConfiguration conf) - throws IOException { - conf.setBoolean(OZONE_ENABLED, true); - SCMStorage scmStore = new SCMStorage(conf); - if(scmStore.getState() != StorageState.INITIALIZED) { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - return StorageContainerManager.createSCM(null, conf); - } - - @Test - public void testAllocateBlock() throws Exception { - eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false); - GenericTestUtils.waitFor(() -> { - return !blockManager.isScmInChillMode(); - }, 10, 1000 * 5); - AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, containerOwner); - Assert.assertNotNull(block); - } - - @Test - public void testAllocateOversizedBlock() throws Exception { - eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false); - GenericTestUtils.waitFor(() -> { - return !blockManager.isScmInChillMode(); - }, 10, 1000 * 5); - long size = 6 * GB; - thrown.expectMessage("Unsupported block size"); - AllocatedBlock block = blockManager.allocateBlock(size, - type, factor, containerOwner); - } - - - @Test - public void testAllocateBlockFailureInChillMode() throws Exception { - eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, true); - GenericTestUtils.waitFor(() -> { - return blockManager.isScmInChillMode(); - }, 10, 1000 * 5); - // Test1: In chill mode expect an SCMException. - thrown.expectMessage("ChillModePrecheck failed for " - + "allocateBlock"); - blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, containerOwner); - } - - @Test - public void testAllocateBlockSucInChillMode() throws Exception { - // Test2: Exit chill mode and then try allocateBock again. - eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false); - GenericTestUtils.waitFor(() -> { - return !blockManager.isScmInChillMode(); - }, 10, 1000 * 5); - Assert.assertNotNull(blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, containerOwner)); - } - - @Override - public void onMessage(Boolean aBoolean, EventPublisher publisher) { - System.out.println("test"); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java deleted file mode 100644 index 9f0e336df1fef..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ /dev/null @@ -1,403 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.scm.container.ContainerMapping; -import org.apache.hadoop.hdds.scm.container.Mapping; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.utils.MetadataStore; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.UUID; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_BLOCK_DELETION_MAX_RETRY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.when; - -/** - * Tests for DeletedBlockLog. - */ -public class TestDeletedBlockLog { - - private static DeletedBlockLogImpl deletedBlockLog; - private OzoneConfiguration conf; - private File testDir; - private Mapping containerManager; - private List dnList; - - @Before - public void setup() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestDeletedBlockLog.class.getSimpleName()); - conf = new OzoneConfiguration(); - conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - containerManager = Mockito.mock(ContainerMapping.class); - deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager); - dnList = new ArrayList<>(3); - setupContainerManager(); - } - - private void setupContainerManager() throws IOException { - dnList.add( - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - dnList.add( - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - dnList.add( - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - - ContainerInfo containerInfo = - new ContainerInfo.Builder().setContainerID(1).build(); - Pipeline pipeline = - new Pipeline(null, LifeCycleState.CLOSED, - ReplicationType.RATIS, ReplicationFactor.THREE, null); - pipeline.addMember(dnList.get(0)); - pipeline.addMember(dnList.get(1)); - pipeline.addMember(dnList.get(2)); - ContainerWithPipeline containerWithPipeline = - new ContainerWithPipeline(containerInfo, pipeline); - when(containerManager.getContainerWithPipeline(anyLong())) - .thenReturn(containerWithPipeline); - when(containerManager.getContainer(anyLong())).thenReturn(containerInfo); - } - - @After - public void tearDown() throws Exception { - deletedBlockLog.close(); - FileUtils.deleteDirectory(testDir); - } - - private Map> generateData(int dataSize) { - Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); - for (int i = 0; i < dataSize; i++) { - long containerID = continerIDBase + i; - List blocks = new ArrayList<>(); - int blockSize = random.nextInt(30) + 1; - for (int j = 0; j < blockSize; j++) { - long localID = localIDBase + j; - blocks.add(localID); - } - blockMap.put(containerID, blocks); - } - return blockMap; - } - - private void commitTransactions( - List transactionResults, - DatanodeDetails... dns) { - for (DatanodeDetails dnDetails : dns) { - deletedBlockLog - .commitTransactions(transactionResults, dnDetails.getUuid()); - } - } - - private void commitTransactions( - List transactionResults) { - commitTransactions(transactionResults, - dnList.toArray(new DatanodeDetails[3])); - } - - private void commitTransactions( - Collection deletedBlocksTransactions, - DatanodeDetails... dns) { - commitTransactions(deletedBlocksTransactions.stream() - .map(this::createDeleteBlockTransactionResult) - .collect(Collectors.toList()), dns); - } - - private void commitTransactions( - Collection deletedBlocksTransactions) { - commitTransactions(deletedBlocksTransactions.stream() - .map(this::createDeleteBlockTransactionResult) - .collect(Collectors.toList())); - } - - private DeleteBlockTransactionResult createDeleteBlockTransactionResult( - DeletedBlocksTransaction transaction) { - return DeleteBlockTransactionResult.newBuilder() - .setContainerID(transaction.getContainerID()).setSuccess(true) - .setTxID(transaction.getTxID()).build(); - } - - private List getTransactions( - int maximumAllowedTXNum) throws IOException { - DatanodeDeletedBlockTransactions transactions = - new DatanodeDeletedBlockTransactions(containerManager, - maximumAllowedTXNum, 3); - deletedBlockLog.getTransactions(transactions); - return transactions.getDatanodeTransactions(dnList.get(0).getUuid()); - } - - @Test - public void testIncrementCount() throws Exception { - int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - - // Create 30 TXs in the log. - for (Map.Entry> entry : generateData(30).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - - // This will return all TXs, total num 30. - List blocks = - getTransactions(40); - List txIDs = blocks.stream().map(DeletedBlocksTransaction::getTxID) - .collect(Collectors.toList()); - - for (int i = 0; i < maxRetry; i++) { - deletedBlockLog.incrementCount(txIDs); - } - - // Increment another time so it exceed the maxRetry. - // On this call, count will be set to -1 which means TX eventually fails. - deletedBlockLog.incrementCount(txIDs); - blocks = getTransactions(40); - for (DeletedBlocksTransaction block : blocks) { - Assert.assertEquals(-1, block.getCount()); - } - - // If all TXs are failed, getTransactions call will always return nothing. - blocks = getTransactions(40); - Assert.assertEquals(blocks.size(), 0); - } - - @Test - public void testCommitTransactions() throws Exception { - for (Map.Entry> entry : generateData(50).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - List blocks = - getTransactions(20); - // Add an invalid txn. - blocks.add( - DeletedBlocksTransaction.newBuilder().setContainerID(1).setTxID(70) - .setCount(0).addLocalID(0).build()); - commitTransactions(blocks); - blocks.remove(blocks.size() - 1); - - blocks = getTransactions(50); - Assert.assertEquals(30, blocks.size()); - commitTransactions(blocks, dnList.get(1), dnList.get(2), - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - - blocks = getTransactions(50); - Assert.assertEquals(30, blocks.size()); - commitTransactions(blocks, dnList.get(0)); - - blocks = getTransactions(50); - Assert.assertEquals(0, blocks.size()); - } - - @Test - public void testRandomOperateTransactions() throws Exception { - Random random = new Random(); - int added = 0, committed = 0; - List blocks = new ArrayList<>(); - List txIDs = new ArrayList<>(); - byte[] latestTxid = DFSUtil.string2Bytes("#LATEST_TXID#"); - MetadataKeyFilters.MetadataKeyFilter avoidLatestTxid = - (preKey, currentKey, nextKey) -> - !Arrays.equals(latestTxid, currentKey); - MetadataStore store = deletedBlockLog.getDeletedStore(); - // Randomly add/get/commit/increase transactions. - for (int i = 0; i < 100; i++) { - int state = random.nextInt(4); - if (state == 0) { - for (Map.Entry> entry : - generateData(10).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - added += 10; - } else if (state == 1) { - blocks = getTransactions(20); - txIDs = new ArrayList<>(); - for (DeletedBlocksTransaction block : blocks) { - txIDs.add(block.getTxID()); - } - deletedBlockLog.incrementCount(txIDs); - } else if (state == 2) { - commitTransactions(blocks); - committed += blocks.size(); - blocks = new ArrayList<>(); - } else { - // verify the number of added and committed. - List> result = - store.getRangeKVs(null, added, avoidLatestTxid); - Assert.assertEquals(added, result.size() + committed); - } - } - blocks = getTransactions(1000); - commitTransactions(blocks); - } - - @Test - public void testPersistence() throws Exception { - for (Map.Entry> entry : generateData(50).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - // close db and reopen it again to make sure - // transactions are stored persistently. - deletedBlockLog.close(); - deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager); - List blocks = - getTransactions(10); - commitTransactions(blocks); - blocks = getTransactions(100); - Assert.assertEquals(40, blocks.size()); - commitTransactions(blocks); - } - - @Test - public void testDeletedBlockTransactions() throws IOException { - int txNum = 10; - int maximumAllowedTXNum = 5; - List blocks = null; - List containerIDs = new LinkedList<>(); - DatanodeDetails dnId1 = dnList.get(0), dnId2 = dnList.get(1); - - int count = 0; - long containerID = 0L; - - // Creates {TXNum} TX in the log. - for (Map.Entry> entry : generateData(txNum) - .entrySet()) { - count++; - containerID = entry.getKey(); - containerIDs.add(containerID); - deletedBlockLog.addTransaction(containerID, entry.getValue()); - - // make TX[1-6] for datanode1; TX[7-10] for datanode2 - if (count <= (maximumAllowedTXNum + 1)) { - mockContainerInfo(containerID, dnId1); - } else { - mockContainerInfo(containerID, dnId2); - } - } - - DatanodeDeletedBlockTransactions transactions = - new DatanodeDeletedBlockTransactions(containerManager, - maximumAllowedTXNum, 2); - deletedBlockLog.getTransactions(transactions); - - for (UUID id : transactions.getDatanodeIDs()) { - List txs = transactions - .getDatanodeTransactions(id); - // delete TX ID - commitTransactions(txs); - } - - blocks = getTransactions(txNum); - // There should be one block remained since dnID1 reaches - // the maximum value (5). - Assert.assertEquals(1, blocks.size()); - - Assert.assertFalse(transactions.isFull()); - // The number of TX in dnID1 won't more than maximum value. - Assert.assertEquals(maximumAllowedTXNum, - transactions.getDatanodeTransactions(dnId1.getUuid()).size()); - - int size = transactions.getDatanodeTransactions(dnId2.getUuid()).size(); - // add duplicated container in dnID2, this should be failed. - DeletedBlocksTransaction.Builder builder = - DeletedBlocksTransaction.newBuilder(); - builder.setTxID(11); - builder.setContainerID(containerID); - builder.setCount(0); - transactions.addTransaction(builder.build(), - null); - - // The number of TX in dnID2 should not be changed. - Assert.assertEquals(size, - transactions.getDatanodeTransactions(dnId2.getUuid()).size()); - - // Add new TX in dnID2, then dnID2 will reach maximum value. - containerID = RandomUtils.nextLong(); - builder = DeletedBlocksTransaction.newBuilder(); - builder.setTxID(12); - builder.setContainerID(containerID); - builder.setCount(0); - mockContainerInfo(containerID, dnId2); - transactions.addTransaction(builder.build(), - null); - // Since all node are full, then transactions is full. - Assert.assertTrue(transactions.isFull()); - } - - private void mockContainerInfo(long containerID, DatanodeDetails dd) - throws IOException { - Pipeline pipeline = - new Pipeline("fake", LifeCycleState.OPEN, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, - PipelineID.randomId()); - pipeline.addMember(dd); - - ContainerInfo.Builder builder = new ContainerInfo.Builder(); - builder.setPipelineID(pipeline.getId()) - .setReplicationType(pipeline.getType()) - .setReplicationFactor(pipeline.getFactor()); - - ContainerInfo containerInfo = builder.build(); - ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline( - containerInfo, pipeline); - Mockito.doReturn(containerInfo).when(containerManager) - .getContainer(containerID); - Mockito.doReturn(containerWithPipeline).when(containerManager) - .getContainerWithPipeline(containerID); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java deleted file mode 100644 index a67df6982aae4..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Make checkstyle happy. - * */ -package org.apache.hadoop.hdds.scm.block; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java deleted file mode 100644 index afa25e2af7bd2..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.command; - -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .CommandStatusReportFromDatanode; - -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; - -public class TestCommandStatusReportHandler implements EventPublisher { - - private static final Logger LOG = LoggerFactory - .getLogger(TestCommandStatusReportHandler.class); - private CommandStatusReportHandler cmdStatusReportHandler; - private String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/" + UUID.randomUUID().toString()); - - @Before - public void setup() { - cmdStatusReportHandler = new CommandStatusReportHandler(); - } - - @Test - public void testCommandStatusReport() { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(LOG); - - CommandStatusReportFromDatanode report = this.getStatusReport(Collections - .emptyList()); - cmdStatusReportHandler.onMessage(report, this); - assertFalse(logCapturer.getOutput().contains("Delete_Block_Status")); - assertFalse(logCapturer.getOutput().contains( - "Close_Container_Command_Status")); - assertFalse(logCapturer.getOutput().contains("Replicate_Command_Status")); - - - report = this.getStatusReport(this.getCommandStatusList()); - cmdStatusReportHandler.onMessage(report, this); - assertTrue(logCapturer.getOutput().contains("firing event of type " + - "Delete_Block_Status")); - assertTrue(logCapturer.getOutput().contains("firing event of type " + - "Close_Container_Command_Status")); - assertTrue(logCapturer.getOutput().contains("firing event of type " + - "Replicate_Command_Status")); - - assertTrue(logCapturer.getOutput().contains("type: " + - "closeContainerCommand")); - assertTrue(logCapturer.getOutput().contains("type: " + - "deleteBlocksCommand")); - assertTrue(logCapturer.getOutput().contains("type: " + - "replicateContainerCommand")); - - } - - private CommandStatusReportFromDatanode getStatusReport( - List reports) { - CommandStatusReportsProto report = TestUtils.createCommandStatusReport( - reports); - DatanodeDetails dn = TestUtils.randomDatanodeDetails(); - return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode( - dn, report); - } - - @Override - public > void fireEvent - (EVENT_TYPE event, PAYLOAD payload) { - LOG.info("firing event of type {}, payload {}", event.getName(), payload - .toString()); - } - - private List getCommandStatusList() { - List reports = new ArrayList<>(3); - - // Add status message for replication, close container and delete block - // command. - CommandStatus.Builder builder = CommandStatus.newBuilder(); - - builder.setCmdId(HddsIdFactory.getLongId()) - .setStatus(CommandStatus.Status.EXECUTED) - .setType(Type.deleteBlocksCommand); - reports.add(builder.build()); - - builder.setCmdId(HddsIdFactory.getLongId()) - .setStatus(CommandStatus.Status.EXECUTED) - .setType(Type.closeContainerCommand); - reports.add(builder.build()); - - builder.setMsg("Not enough space") - .setCmdId(HddsIdFactory.getLongId()) - .setStatus(CommandStatus.Status.FAILED) - .setType(Type.replicateContainerCommand); - reports.add(builder.build()); - return reports; - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java deleted file mode 100644 index f529c20e74e0e..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.command; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java deleted file mode 100644 index 3221053573081..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ /dev/null @@ -1,592 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap; -import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.assertj.core.util.Preconditions; - -import java.io.IOException; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; - -/** - * Test Helper for testing container Mapping. - */ -public class MockNodeManager implements NodeManager { - private final static NodeData[] NODES = { - new NodeData(10L * OzoneConsts.TB, OzoneConsts.GB), - new NodeData(64L * OzoneConsts.TB, 100 * OzoneConsts.GB), - new NodeData(128L * OzoneConsts.TB, 256 * OzoneConsts.GB), - new NodeData(40L * OzoneConsts.TB, OzoneConsts.TB), - new NodeData(256L * OzoneConsts.TB, 200 * OzoneConsts.TB), - new NodeData(20L * OzoneConsts.TB, 10 * OzoneConsts.GB), - new NodeData(32L * OzoneConsts.TB, 16 * OzoneConsts.TB), - new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB), - new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB, NodeData.STALE), - new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.STALE), - new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.DEAD) - }; - private final List healthyNodes; - private final List staleNodes; - private final List deadNodes; - private final Map nodeMetricMap; - private final SCMNodeStat aggregateStat; - private boolean chillmode; - private final Map> commandMap; - private final Node2PipelineMap node2PipelineMap; - private final Node2ContainerMap node2ContainerMap; - - public MockNodeManager(boolean initializeFakeNodes, int nodeCount) { - this.healthyNodes = new LinkedList<>(); - this.staleNodes = new LinkedList<>(); - this.deadNodes = new LinkedList<>(); - this.nodeMetricMap = new HashMap<>(); - this.node2PipelineMap = new Node2PipelineMap(); - this.node2ContainerMap = new Node2ContainerMap(); - aggregateStat = new SCMNodeStat(); - if (initializeFakeNodes) { - for (int x = 0; x < nodeCount; x++) { - DatanodeDetails dd = TestUtils.randomDatanodeDetails(); - populateNodeMetric(dd, x); - } - } - chillmode = false; - this.commandMap = new HashMap<>(); - } - - /** - * Invoked from ctor to create some node Metrics. - * - * @param datanodeDetails - Datanode details - */ - private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) { - SCMNodeStat newStat = new SCMNodeStat(); - long remaining = - NODES[x % NODES.length].capacity - NODES[x % NODES.length].used; - newStat.set( - (NODES[x % NODES.length].capacity), - (NODES[x % NODES.length].used), remaining); - this.nodeMetricMap.put(datanodeDetails.getUuid(), newStat); - aggregateStat.add(newStat); - - if (NODES[x % NODES.length].getCurrentState() == NodeData.HEALTHY) { - healthyNodes.add(datanodeDetails); - } - - if (NODES[x % NODES.length].getCurrentState() == NodeData.STALE) { - staleNodes.add(datanodeDetails); - } - - if (NODES[x % NODES.length].getCurrentState() == NodeData.DEAD) { - deadNodes.add(datanodeDetails); - } - - } - - /** - * Sets the chill mode value. - * @param chillmode boolean - */ - public void setChillmode(boolean chillmode) { - this.chillmode = chillmode; - } - - /** - * Removes a data node from the management of this Node Manager. - * - * @param node - DataNode. - * @throws NodeNotFoundException - */ - @Override - public void removeNode(DatanodeDetails node) - throws NodeNotFoundException { - - } - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * - * @param nodestate - State of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - @Override - public List getNodes(HddsProtos.NodeState nodestate) { - if (nodestate == HEALTHY) { - return healthyNodes; - } - - if (nodestate == STALE) { - return staleNodes; - } - - if (nodestate == DEAD) { - return deadNodes; - } - - return null; - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param nodestate - State of the node - * @return int -- count - */ - @Override - public int getNodeCount(HddsProtos.NodeState nodestate) { - List nodes = getNodes(nodestate); - if (nodes != null) { - return nodes.size(); - } - return 0; - } - - /** - * Get all datanodes known to SCM. - * - * @return List of DatanodeDetails known to SCM. - */ - @Override - public List getAllNodes() { - return null; - } - - /** - * Get the minimum number of nodes to get out of chill mode. - * - * @return int - */ - @Override - public int getMinimumChillModeNodes() { - return 0; - } - - /** - * Chill mode is the period when node manager waits for a minimum configured - * number of datanodes to report in. This is called chill mode to indicate the - * period before node manager gets into action. - *

- * Forcefully exits the chill mode, even if we have not met the minimum - * criteria of the nodes reporting in. - */ - @Override - public void forceExitChillMode() { - - } - - /** - * Puts the node manager into manual chill mode. - */ - @Override - public void enterChillMode() { - - } - - /** - * Brings node manager out of manual chill mode. - */ - @Override - public void exitChillMode() { - - } - - /** - * Returns true if node manager is out of chill mode, else false. - * @return true if out of chill mode, else false - */ - @Override - public boolean isOutOfChillMode() { - return !chillmode; - } - - /** - * Returns a chill mode status string. - * - * @return String - */ - @Override - public String getChillModeStatus() { - return null; - } - - /** - * Returns the aggregated node stats. - * @return the aggregated node stats. - */ - @Override - public SCMNodeStat getStats() { - return aggregateStat; - } - - /** - * Return a map of nodes to their stats. - * @return a list of individual node stats (live/stale but not dead). - */ - @Override - public Map getNodeStats() { - return nodeMetricMap; - } - - /** - * Return the node stat of the specified datanode. - * @param datanodeDetails - datanode details. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - @Override - public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { - SCMNodeStat stat = nodeMetricMap.get(datanodeDetails.getUuid()); - if (stat == null) { - return null; - } - return new SCMNodeMetric(stat); - } - - /** - * Returns the node state of a specific node. - * - * @param dd - DatanodeDetails - * @return Healthy/Stale/Dead. - */ - @Override - public HddsProtos.NodeState getNodeState(DatanodeDetails dd) { - return null; - } - - /** - * Get set of pipelines a datanode is part of. - * @param dnId - datanodeID - * @return Set of PipelineID - */ - @Override - public Set getPipelineByDnID(UUID dnId) { - return node2PipelineMap.getPipelines(dnId); - } - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - @Override - public void addPipeline(Pipeline pipeline) { - node2PipelineMap.addPipeline(pipeline); - } - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - @Override - public void removePipeline(Pipeline pipeline) { - node2PipelineMap.removePipeline(pipeline); - } - - @Override - public void addDatanodeCommand(UUID dnId, SCMCommand command) { - if(commandMap.containsKey(dnId)) { - List commandList = commandMap.get(dnId); - Preconditions.checkNotNull(commandList); - commandList.add(command); - } else { - List commandList = new LinkedList<>(); - commandList.add(command); - commandMap.put(dnId, commandList); - } - } - - /** - * Empty implementation for processNodeReport. - * - * @param dnUuid - * @param nodeReport - */ - @Override - public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) { - // do nothing - } - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws SCMException - if datanode is not known. For new datanode use - * addDatanodeInContainerMap call. - */ - @Override - public void setContainersForDatanode(UUID uuid, Set containerIds) - throws SCMException { - node2ContainerMap.setContainersForDatanode(uuid, containerIds); - } - - /** - * Process containerReport received from datanode. - * @param uuid - DataonodeID - * @param containerIds - Set of containerIDs - * @return The result after processing containerReport - */ - @Override - public ReportResult processContainerReport(UUID uuid, - Set containerIds) { - return node2ContainerMap.processReport(uuid, containerIds); - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - @Override - public Set getContainers(UUID uuid) { - return node2ContainerMap.getContainers(uuid); - } - - /** - * Insert a new datanode with set of containerIDs for containers available - * on it. - * @param uuid - DatanodeID - * @param containerIDs - Set of ContainerIDs - * @throws SCMException - if datanode already exists - */ - @Override - public void addDatanodeInContainerMap(UUID uuid, - Set containerIDs) throws SCMException { - node2ContainerMap.insertNewDatanode(uuid, containerIDs); - } - - // Returns the number of commands that is queued to this node manager. - public int getCommandCount(DatanodeDetails dd) { - List list = commandMap.get(dd.getUuid()); - return (list == null) ? 0 : list.size(); - } - - public void clearCommandQueue(UUID dnId) { - if(commandMap.containsKey(dnId)) { - commandMap.put(dnId, new LinkedList<>()); - } - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful attention. It is strongly advised to relinquish the - * underlying resources and to internally mark the {@code Closeable} - * as closed, prior to throwing the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } - - /** - * Gets the version info from SCM. - * - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed by - * datanode. - */ - @Override - public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { - return null; - } - - /** - * Register the node if the node finds that it is not registered with any - * SCM. - * - * @param datanodeDetails DatanodeDetails - * @param nodeReport NodeReportProto - * @return SCMHeartbeatResponseProto - */ - @Override - public RegisteredCommand register(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) { - return null; - } - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * - * @param datanodeDetails - Datanode ID. - * @return SCMheartbeat response list - */ - @Override - public List processHeartbeat(DatanodeDetails datanodeDetails) { - return null; - } - - @Override - public Map getNodeCount() { - Map nodeCountMap = new HashMap(); - for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) { - nodeCountMap.put(state.toString(), getNodeCount(state)); - } - return nodeCountMap; - } - - /** - * Makes it easy to add a container. - * - * @param datanodeDetails datanode details - * @param size number of bytes. - */ - public void addContainer(DatanodeDetails datanodeDetails, long size) { - SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails.getUuid()); - if (stat != null) { - aggregateStat.subtract(stat); - stat.getCapacity().add(size); - aggregateStat.add(stat); - nodeMetricMap.put(datanodeDetails.getUuid(), stat); - } - } - - /** - * Makes it easy to simulate a delete of a container. - * - * @param datanodeDetails datanode Details - * @param size number of bytes. - */ - public void delContainer(DatanodeDetails datanodeDetails, long size) { - SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails.getUuid()); - if (stat != null) { - aggregateStat.subtract(stat); - stat.getCapacity().subtract(size); - aggregateStat.add(stat); - nodeMetricMap.put(datanodeDetails.getUuid(), stat); - } - } - - @Override - public void onMessage(CommandForDatanode commandForDatanode, - EventPublisher publisher) { - addDatanodeCommand(commandForDatanode.getDatanodeId(), - commandForDatanode.getCommand()); - } - - /** - * Remove the node stats and update the storage stats - * in this Node Manager. - * - * @param dnUuid UUID of the datanode. - */ - @Override - public void processDeadNode(UUID dnUuid) { - SCMNodeStat stat = this.nodeMetricMap.get(dnUuid); - if (stat != null) { - aggregateStat.subtract(stat); - stat.set(0, 0, 0); - } - } - - /** - * A class to declare some values for the nodes so that our tests - * won't fail. - */ - private static class NodeData { - public static final long HEALTHY = 1; - public static final long STALE = 2; - public static final long DEAD = 3; - - private long capacity; - private long used; - - private long currentState; - - /** - * By default nodes are healthy. - * @param capacity - * @param used - */ - NodeData(long capacity, long used) { - this(capacity, used, HEALTHY); - } - - /** - * Constructs a nodeDefinition. - * - * @param capacity capacity. - * @param used used. - * @param currentState - Healthy, Stale and DEAD nodes. - */ - NodeData(long capacity, long used, long currentState) { - this.capacity = capacity; - this.used = used; - this.currentState = currentState; - } - - public long getCapacity() { - return capacity; - } - - public void setCapacity(long capacity) { - this.capacity = capacity; - } - - public long getUsed() { - return used; - } - - public void setUsed(long used) { - this.used = used; - } - - public long getCurrentState() { - return currentState; - } - - public void setCurrentState(long currentState) { - this.currentState = currentState; - } - - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java deleted file mode 100644 index 38050c9d0371a..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerWithPipeline; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; - -/** - * Tests the closeContainerEventHandler class. - */ -public class TestCloseContainerEventHandler { - - private static Configuration configuration; - private static MockNodeManager nodeManager; - private static ContainerMapping mapping; - private static long size; - private static File testDir; - private static EventQueue eventQueue; - - @BeforeClass - public static void setUp() throws Exception { - configuration = SCMTestUtils.getConf(); - size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE, - OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); - testDir = GenericTestUtils - .getTestDir(TestCloseContainerEventHandler.class.getSimpleName()); - configuration - .set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - nodeManager = new MockNodeManager(true, 10); - mapping = new ContainerMapping(configuration, nodeManager, 128, - new EventQueue()); - eventQueue = new EventQueue(); - eventQueue.addHandler(CLOSE_CONTAINER, - new CloseContainerEventHandler(mapping)); - eventQueue.addHandler(DATANODE_COMMAND, nodeManager); - } - - @AfterClass - public static void tearDown() throws Exception { - if (mapping != null) { - mapping.close(); - } - FileUtil.fullyDelete(testDir); - } - - @Test - public void testIfCloseContainerEventHadnlerInvoked() { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerEventHandler.LOG); - eventQueue.fireEvent(CLOSE_CONTAINER, - new ContainerID(Math.abs(RandomUtils.nextInt()))); - eventQueue.processAll(1000); - Assert.assertTrue(logCapturer.getOutput() - .contains("Close container Event triggered for container")); - } - - @Test - public void testCloseContainerEventWithInvalidContainer() { - long id = Math.abs(RandomUtils.nextInt()); - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerEventHandler.LOG); - eventQueue.fireEvent(CLOSE_CONTAINER, - new ContainerID(id)); - eventQueue.processAll(1000); - Assert.assertTrue(logCapturer.getOutput() - .contains("Failed to update the container state")); - } - - @Test - public void testCloseContainerEventWithValidContainers() throws IOException { - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerEventHandler.LOG); - ContainerWithPipeline containerWithPipeline = mapping - .allocateContainer(HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE, "ozone"); - ContainerID id = new ContainerID( - containerWithPipeline.getContainerInfo().getContainerID()); - DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader(); - int closeCount = nodeManager.getCommandCount(datanode); - eventQueue.fireEvent(CLOSE_CONTAINER, id); - eventQueue.processAll(1000); - // At this point of time, the allocated container is not in open - // state, so firing close container event should not queue CLOSE - // command in the Datanode - Assert.assertEquals(0, nodeManager.getCommandCount(datanode)); - //Execute these state transitions so that we can close the container. - mapping.updateContainerState(id.getId(), CREATED); - eventQueue.fireEvent(CLOSE_CONTAINER, - new ContainerID( - containerWithPipeline.getContainerInfo().getContainerID())); - eventQueue.processAll(1000); - Assert.assertEquals(closeCount + 1, - nodeManager.getCommandCount(datanode)); - Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING, - mapping.getStateManager().getContainer(id).getState()); - } - - @Test - public void testCloseContainerEventWithRatis() throws IOException { - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerEventHandler.LOG); - ContainerWithPipeline containerWithPipeline = mapping - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, "ozone"); - ContainerID id = new ContainerID( - containerWithPipeline.getContainerInfo().getContainerID()); - int[] closeCount = new int[3]; - eventQueue.fireEvent(CLOSE_CONTAINER, id); - eventQueue.processAll(1000); - int i = 0; - for (DatanodeDetails details : containerWithPipeline.getPipeline() - .getMachines()) { - closeCount[i] = nodeManager.getCommandCount(details); - i++; - } - i = 0; - for (DatanodeDetails details : containerWithPipeline.getPipeline() - .getMachines()) { - Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details)); - i++; - } - //Execute these state transitions so that we can close the container. - mapping.updateContainerState(id.getId(), CREATED); - eventQueue.fireEvent(CLOSE_CONTAINER, id); - eventQueue.processAll(1000); - i = 0; - // Make sure close is queued for each datanode on the pipeline - for (DatanodeDetails details : containerWithPipeline.getPipeline() - .getMachines()) { - Assert.assertEquals(closeCount[i] + 1, - nodeManager.getCommandCount(details)); - Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING, - mapping.getStateManager().getContainer(id).getState()); - i++; - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java deleted file mode 100644 index 0997e1f5bc9ca..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerActionsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerActionsFromDatanode; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * Tests ContainerActionsHandler. - */ -public class TestContainerActionsHandler { - - @Test - public void testCloseContainerAction() { - EventQueue queue = new EventQueue(); - ContainerActionsHandler actionsHandler = new ContainerActionsHandler(); - CloseContainerEventHandler closeContainerEventHandler = Mockito.mock( - CloseContainerEventHandler.class); - queue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerEventHandler); - queue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler); - - ContainerAction action = ContainerAction.newBuilder() - .setContainerID(1L) - .setAction(ContainerAction.Action.CLOSE) - .setReason(ContainerAction.Reason.CONTAINER_FULL) - .build(); - - ContainerActionsProto cap = ContainerActionsProto.newBuilder() - .addContainerActions(action) - .build(); - - ContainerActionsFromDatanode containerActions = - new ContainerActionsFromDatanode( - TestUtils.randomDatanodeDetails(), cap); - - queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions); - - verify(closeContainerEventHandler, times(1)) - .onMessage(ContainerID.valueof(1L), queue); - - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java deleted file mode 100644 index f9a881e27d863..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java +++ /dev/null @@ -1,380 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.NavigableSet; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -/** - * Tests for Container Mapping. - */ -public class TestContainerMapping { - private static ContainerMapping mapping; - private static MockNodeManager nodeManager; - private static File testDir; - private static XceiverClientManager xceiverClientManager; - private static String containerOwner = "OZONE"; - private static Random random; - - private static final long TIMEOUT = 10000; - - @Rule - public ExpectedException thrown = ExpectedException.none(); - @BeforeClass - public static void setUp() throws Exception { - Configuration conf = SCMTestUtils.getConf(); - - testDir = GenericTestUtils - .getTestDir(TestContainerMapping.class.getSimpleName()); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, - TIMEOUT, - TimeUnit.MILLISECONDS); - boolean folderExisted = testDir.exists() || testDir.mkdirs(); - if (!folderExisted) { - throw new IOException("Unable to create test directory path"); - } - nodeManager = new MockNodeManager(true, 10); - mapping = new ContainerMapping(conf, nodeManager, 128, - new EventQueue()); - xceiverClientManager = new XceiverClientManager(conf); - random = new Random(); - } - - @AfterClass - public static void cleanup() throws IOException { - if(mapping != null) { - mapping.close(); - } - FileUtil.fullyDelete(testDir); - } - - @Before - public void clearChillMode() { - nodeManager.setChillmode(false); - } - - @Test - public void testallocateContainer() throws Exception { - ContainerWithPipeline containerInfo = mapping.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - Assert.assertNotNull(containerInfo); - } - - @Test - public void testallocateContainerDistributesAllocation() throws Exception { - /* This is a lame test, we should really be testing something like - z-score or make sure that we don't have 3sigma kind of events. Too lazy - to write all that code. This test very lamely tests if we have more than - 5 separate nodes from the list of 10 datanodes that got allocated a - container. - */ - Set pipelineList = new TreeSet<>(); - for (int x = 0; x < 30; x++) { - ContainerWithPipeline containerInfo = mapping.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - - Assert.assertNotNull(containerInfo); - Assert.assertNotNull(containerInfo.getPipeline()); - pipelineList.add(containerInfo.getPipeline().getLeader() - .getUuid()); - } - Assert.assertTrue(pipelineList.size() > 5); - } - - @Test - public void testGetContainer() throws IOException { - ContainerWithPipeline containerInfo = mapping.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - Pipeline pipeline = containerInfo.getPipeline(); - Assert.assertNotNull(pipeline); - Pipeline newPipeline = containerInfo.getPipeline(); - Assert.assertEquals(pipeline.getLeader().getUuid(), - newPipeline.getLeader().getUuid()); - } - - @Test - public void testGetContainerWithPipeline() throws Exception { - ContainerWithPipeline containerWithPipeline = mapping.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - ContainerInfo contInfo = containerWithPipeline.getContainerInfo(); - // Add dummy replicas for container. - DatanodeDetails dn1 = DatanodeDetails.newBuilder() - .setHostName("host1") - .setIpAddress("1.1.1.1") - .setUuid(UUID.randomUUID().toString()).build(); - DatanodeDetails dn2 = DatanodeDetails.newBuilder() - .setHostName("host2") - .setIpAddress("2.2.2.2") - .setUuid(UUID.randomUUID().toString()).build(); - mapping - .updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CREATE); - mapping.updateContainerState(contInfo.getContainerID(), - LifeCycleEvent.CREATED); - mapping.updateContainerState(contInfo.getContainerID(), - LifeCycleEvent.FINALIZE); - mapping - .updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CLOSE); - ContainerInfo finalContInfo = contInfo; - LambdaTestUtils.intercept(SCMException.class, "No entry exist for " - + "containerId:", () -> mapping.getContainerWithPipeline( - finalContInfo.getContainerID())); - - mapping.getStateManager().getContainerStateMap() - .addContainerReplica(contInfo.containerID(), dn1, dn2); - - contInfo = mapping.getContainer(contInfo.getContainerID()); - Assert.assertEquals(contInfo.getState(), LifeCycleState.CLOSED); - Pipeline pipeline = containerWithPipeline.getPipeline(); - mapping.getPipelineSelector().finalizePipeline(pipeline); - - ContainerWithPipeline containerWithPipeline2 = mapping - .getContainerWithPipeline(contInfo.getContainerID()); - pipeline = containerWithPipeline2.getPipeline(); - Assert.assertNotEquals(containerWithPipeline, containerWithPipeline2); - Assert.assertNotNull("Pipeline should not be null", pipeline); - Assert.assertTrue(pipeline.getDatanodeHosts().contains(dn1.getHostName())); - Assert.assertTrue(pipeline.getDatanodeHosts().contains(dn2.getHostName())); - } - - @Test - public void testgetNoneExistentContainer() throws IOException { - thrown.expectMessage("Specified key does not exist."); - mapping.getContainer(random.nextLong()); - } - - @Test - public void testContainerCreationLeaseTimeout() throws IOException, - InterruptedException { - nodeManager.setChillmode(false); - ContainerWithPipeline containerInfo = mapping.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - mapping.updateContainerState(containerInfo.getContainerInfo() - .getContainerID(), HddsProtos.LifeCycleEvent.CREATE); - Thread.sleep(TIMEOUT + 1000); - - NavigableSet deleteContainers = mapping.getStateManager() - .getMatchingContainerIDs( - "OZONE", - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.DELETING); - Assert.assertTrue(deleteContainers - .contains(containerInfo.getContainerInfo().containerID())); - - thrown.expect(IOException.class); - thrown.expectMessage("Lease Exception"); - mapping - .updateContainerState(containerInfo.getContainerInfo().getContainerID(), - HddsProtos.LifeCycleEvent.CREATED); - } - - @Test - public void testFullContainerReport() throws Exception { - ContainerInfo info = createContainer(); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - List reports = - new ArrayList<>(); - StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder = - StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); - ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") - .setSize(5368709120L) - .setUsed(2000000000L) - .setKeyCount(100000000L) - .setReadCount(100000000L) - .setWriteCount(100000000L) - .setReadBytes(2000000000L) - .setWriteBytes(2000000000L) - .setContainerID(info.getContainerID()) - .setDeleteTransactionId(0); - - reports.add(ciBuilder.build()); - - ContainerReportsProto.Builder crBuilder = ContainerReportsProto - .newBuilder(); - crBuilder.addAllReports(reports); - - mapping.processContainerReports(datanodeDetails, crBuilder.build(), false); - - ContainerInfo updatedContainer = - mapping.getContainer(info.getContainerID()); - Assert.assertEquals(100000000L, - updatedContainer.getNumberOfKeys()); - Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes()); - - for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) { - LambdaTestUtils.intercept(SCMException.class, "No entry " - + "exist for containerId:", () -> mapping.getStateManager() - .getContainerReplicas(ContainerID.valueof(c.getContainerID()))); - } - - mapping.processContainerReports(TestUtils.randomDatanodeDetails(), - crBuilder.build(), true); - for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) { - Assert.assertTrue(mapping.getStateManager().getContainerReplicas( - ContainerID.valueof(c.getContainerID())).size() > 0); - } - } - - @Test - public void testListContainerAfterReport() throws Exception { - ContainerInfo info1 = createContainer(); - ContainerInfo info2 = createContainer(); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - List reports = - new ArrayList<>(); - StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder = - StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); - long cID1 = info1.getContainerID(); - long cID2 = info2.getContainerID(); - ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") - .setSize(1000000000L) - .setUsed(987654321L) - .setKeyCount(100000000L) - .setReadBytes(1000000000L) - .setWriteBytes(1000000000L) - .setContainerID(cID1); - reports.add(ciBuilder.build()); - - ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea54a9") - .setSize(1000000000L) - .setUsed(123456789L) - .setKeyCount(200000000L) - .setReadBytes(3000000000L) - .setWriteBytes(4000000000L) - .setContainerID(cID2); - reports.add(ciBuilder.build()); - - ContainerReportsProto.Builder crBuilder = ContainerReportsProto - .newBuilder(); - crBuilder.addAllReports(reports); - - mapping.processContainerReports(datanodeDetails, crBuilder.build(), false); - - List list = mapping.listContainer(0, 50); - Assert.assertEquals(2, list.stream().filter( - x -> x.getContainerID() == cID1 || x.getContainerID() == cID2).count()); - Assert.assertEquals(300000000L, list.stream().filter( - x -> x.getContainerID() == cID1 || x.getContainerID() == cID2) - .mapToLong(x -> x.getNumberOfKeys()).sum()); - Assert.assertEquals(1111111110L, list.stream().filter( - x -> x.getContainerID() == cID1 || x.getContainerID() == cID2) - .mapToLong(x -> x.getUsedBytes()).sum()); - } - - @Test - public void testCloseContainer() throws IOException { - ContainerInfo info = createContainer(); - mapping.updateContainerState(info.getContainerID(), - HddsProtos.LifeCycleEvent.FINALIZE); - NavigableSet pendingCloseContainers = mapping.getStateManager() - .getMatchingContainerIDs( - containerOwner, - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.CLOSING); - Assert.assertTrue(pendingCloseContainers.contains(info.containerID())); - mapping.updateContainerState(info.getContainerID(), - HddsProtos.LifeCycleEvent.CLOSE); - NavigableSet closeContainers = mapping.getStateManager() - .getMatchingContainerIDs( - containerOwner, - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.CLOSED); - Assert.assertTrue(closeContainers.contains(info.containerID())); - } - - /** - * Creates a container with the given name in ContainerMapping. - * @throws IOException - */ - private ContainerInfo createContainer() - throws IOException { - nodeManager.setChillmode(false); - ContainerWithPipeline containerWithPipeline = mapping.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); - mapping.updateContainerState(containerInfo.getContainerID(), - HddsProtos.LifeCycleEvent.CREATE); - mapping.updateContainerState(containerInfo.getContainerID(), - HddsProtos.LifeCycleEvent.CREATED); - return containerInfo; - } - - @Test - public void testFlushAllContainers() throws IOException { - ContainerInfo info = createContainer(); - List containers = mapping.getStateManager() - .getAllContainers(); - Assert.assertTrue(containers.size() > 0); - mapping.flushContainerInfo(); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java deleted file mode 100644 index f79ae1e32d90d..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ /dev/null @@ -1,239 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo - .Builder; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.replication - .ReplicationActivityStatus; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerReportFromDatanode; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import static org.mockito.Matchers.anyLong; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; -import org.mockito.stubbing.Answer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Test the behaviour of the ContainerReportHandler. - */ -public class TestContainerReportHandler implements EventPublisher { - - private List publishedEvents = new ArrayList<>(); - private final NodeManager nodeManager = new MockNodeManager(true, 1); - - private static final Logger LOG = - LoggerFactory.getLogger(TestContainerReportHandler.class); - - @Before - public void resetEventCollector() { - publishedEvents.clear(); - } - - @Test - public void test() throws IOException { - //GIVEN - OzoneConfiguration conf = new OzoneConfiguration(); - Mapping mapping = Mockito.mock(Mapping.class); - PipelineSelector selector = Mockito.mock(PipelineSelector.class); - - when(mapping.getContainer(anyLong())) - .thenAnswer( - (Answer) invocation -> - new Builder() - .setReplicationFactor(ReplicationFactor.THREE) - .setContainerID((Long) invocation.getArguments()[0]) - .setState(LifeCycleState.CLOSED) - .build() - ); - - ContainerStateManager containerStateManager = - new ContainerStateManager(conf, mapping, selector); - - when(mapping.getStateManager()).thenReturn(containerStateManager); - - ReplicationActivityStatus replicationActivityStatus = - new ReplicationActivityStatus(); - - ContainerReportHandler reportHandler = - new ContainerReportHandler(mapping, nodeManager, - replicationActivityStatus); - - DatanodeDetails dn1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails dn2 = TestUtils.randomDatanodeDetails(); - DatanodeDetails dn3 = TestUtils.randomDatanodeDetails(); - DatanodeDetails dn4 = TestUtils.randomDatanodeDetails(); - nodeManager.addDatanodeInContainerMap(dn1.getUuid(), new HashSet<>()); - nodeManager.addDatanodeInContainerMap(dn2.getUuid(), new HashSet<>()); - nodeManager.addDatanodeInContainerMap(dn3.getUuid(), new HashSet<>()); - nodeManager.addDatanodeInContainerMap(dn4.getUuid(), new HashSet<>()); - PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class); - - Pipeline pipeline = new Pipeline("leader", LifeCycleState.CLOSED, - ReplicationType.STAND_ALONE, ReplicationFactor.THREE, - PipelineID.randomId()); - - when(pipelineSelector.getReplicationPipeline(ReplicationType.STAND_ALONE, - ReplicationFactor.THREE)).thenReturn(pipeline); - - ContainerInfo cont1 = containerStateManager - .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE, - ReplicationFactor.THREE, "root").getContainerInfo(); - ContainerInfo cont2 = containerStateManager - .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE, - ReplicationFactor.THREE, "root").getContainerInfo(); - // Open Container - ContainerInfo cont3 = containerStateManager - .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE, - ReplicationFactor.THREE, "root").getContainerInfo(); - - long c1 = cont1.getContainerID(); - long c2 = cont2.getContainerID(); - long c3 = cont3.getContainerID(); - - // Close remaining containers - TestUtils.closeContainer(containerStateManager, cont1); - TestUtils.closeContainer(containerStateManager, cont2); - - //when - - //initial reports before replication is enabled. 2 containers w 3 replicas. - reportHandler.onMessage( - new ContainerReportFromDatanode(dn1, - createContainerReport(new long[] {c1, c2, c3})), this); - - reportHandler.onMessage( - new ContainerReportFromDatanode(dn2, - createContainerReport(new long[] {c1, c2, c3})), this); - - reportHandler.onMessage( - new ContainerReportFromDatanode(dn3, - createContainerReport(new long[] {c1, c2})), this); - - reportHandler.onMessage( - new ContainerReportFromDatanode(dn4, - createContainerReport(new long[] {})), this); - - Assert.assertEquals(0, publishedEvents.size()); - - replicationActivityStatus.enableReplication(); - - //no problem here - reportHandler.onMessage( - new ContainerReportFromDatanode(dn1, - createContainerReport(new long[] {c1, c2})), this); - - Assert.assertEquals(0, publishedEvents.size()); - - //container is missing from d2 - reportHandler.onMessage( - new ContainerReportFromDatanode(dn2, - createContainerReport(new long[] {c1})), this); - - Assert.assertEquals(1, publishedEvents.size()); - ReplicationRequest replicationRequest = - (ReplicationRequest) publishedEvents.get(0); - - Assert.assertEquals(c2, replicationRequest.getContainerId()); - Assert.assertEquals(3, replicationRequest.getExpecReplicationCount()); - Assert.assertEquals(2, replicationRequest.getReplicationCount()); - - //container was replicated to dn4 - reportHandler.onMessage( - new ContainerReportFromDatanode(dn4, - createContainerReport(new long[] {c2})), this); - - //no more event, everything is perfect - Assert.assertEquals(1, publishedEvents.size()); - - //c2 was found at dn2 (it was missing before, magic) - reportHandler.onMessage( - new ContainerReportFromDatanode(dn2, - createContainerReport(new long[] {c1, c2})), this); - - //c2 is over replicated (dn1,dn2,dn3,dn4) - Assert.assertEquals(2, publishedEvents.size()); - - replicationRequest = - (ReplicationRequest) publishedEvents.get(1); - - Assert.assertEquals(c2, replicationRequest.getContainerId()); - Assert.assertEquals(3, replicationRequest.getExpecReplicationCount()); - Assert.assertEquals(4, replicationRequest.getReplicationCount()); - - } - - private ContainerReportsProto createContainerReport(long[] containerIds) { - - ContainerReportsProto.Builder crBuilder = - ContainerReportsProto.newBuilder(); - - for (long containerId : containerIds) { - org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder - ciBuilder = org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); - ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") - .setSize(5368709120L) - .setUsed(2000000000L) - .setKeyCount(100000000L) - .setReadCount(100000000L) - .setWriteCount(100000000L) - .setReadBytes(2000000000L) - .setWriteBytes(2000000000L) - .setContainerID(containerId) - .setDeleteTransactionId(0); - - crBuilder.addReports(ciBuilder.build()); - } - - return crBuilder.build(); - } - - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - LOG.info("Event is published: {}", payload); - publishedEvents.add(payload); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java deleted file mode 100644 index b857740a5fd64..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest; - -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Testing ContainerStatemanager. - */ -public class TestContainerStateManager { - - private ContainerStateManager containerStateManager; - - @Before - public void init() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - Mapping mapping = Mockito.mock(Mapping.class); - PipelineSelector selector = Mockito.mock(PipelineSelector.class); - containerStateManager = new ContainerStateManager(conf, mapping, selector); - - } - - @Test - public void checkReplicationStateOK() throws IOException { - //GIVEN - ContainerInfo c1 = TestUtils.allocateContainer(containerStateManager); - - DatanodeDetails d1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails d2 = TestUtils.randomDatanodeDetails(); - DatanodeDetails d3 = TestUtils.randomDatanodeDetails(); - - addReplica(c1, d1); - addReplica(c1, d2); - addReplica(c1, d3); - - //WHEN - ReplicationRequest replicationRequest = containerStateManager - .checkReplicationState(new ContainerID(c1.getContainerID())); - - //THEN - Assert.assertNull(replicationRequest); - } - - @Test - public void checkReplicationStateMissingReplica() throws IOException { - //GIVEN - - ContainerInfo c1 = TestUtils.allocateContainer(containerStateManager); - - DatanodeDetails d1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails d2 = TestUtils.randomDatanodeDetails(); - - addReplica(c1, d1); - addReplica(c1, d2); - - //WHEN - ReplicationRequest replicationRequest = containerStateManager - .checkReplicationState(new ContainerID(c1.getContainerID())); - - Assert - .assertEquals(c1.getContainerID(), replicationRequest.getContainerId()); - Assert.assertEquals(2, replicationRequest.getReplicationCount()); - Assert.assertEquals(3, replicationRequest.getExpecReplicationCount()); - } - - private void addReplica(ContainerInfo c1, DatanodeDetails d1) { - containerStateManager - .addContainerReplica(new ContainerID(c1.getContainerID()), d1); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java deleted file mode 100644 index 2f35719816f45..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle happy. - */ -package org.apache.hadoop.hdds.scm.container.closer; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java deleted file mode 100644 index f93aea66e7ba5..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.container; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java deleted file mode 100644 index 764daff77585c..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; - -import org.junit.Assert; -import org.junit.Test; -import static org.mockito.Matchers.anyObject; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; - -public class TestSCMContainerPlacementCapacity { - @Test - public void chooseDatanodes() throws SCMException { - //given - Configuration conf = new OzoneConfiguration(); - - List datanodes = new ArrayList<>(); - for (int i = 0; i < 7; i++) { - datanodes.add(TestUtils.randomDatanodeDetails()); - } - - NodeManager mockNodeManager = Mockito.mock(NodeManager.class); - when(mockNodeManager.getNodes(NodeState.HEALTHY)) - .thenReturn(new ArrayList<>(datanodes)); - - when(mockNodeManager.getNodeStat(anyObject())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L)); - when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); - when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 80L, 20L)); - when(mockNodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(100L, 70L, 30L)); - - SCMContainerPlacementCapacity scmContainerPlacementRandom = - new SCMContainerPlacementCapacity(mockNodeManager, conf); - - List existingNodes = new ArrayList<>(); - existingNodes.add(datanodes.get(0)); - existingNodes.add(datanodes.get(1)); - - Map selectedCount = new HashMap<>(); - for (DatanodeDetails datanode : datanodes) { - selectedCount.put(datanode, 0); - } - - for (int i = 0; i < 1000; i++) { - - //when - List datanodeDetails = - scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15); - - //then - Assert.assertEquals(1, datanodeDetails.size()); - DatanodeDetails datanode0Details = datanodeDetails.get(0); - - Assert.assertNotEquals( - "Datanode 0 should not been selected: excluded by parameter", - datanodes.get(0), datanode0Details); - Assert.assertNotEquals( - "Datanode 1 should not been selected: excluded by parameter", - datanodes.get(1), datanode0Details); - Assert.assertNotEquals( - "Datanode 2 should not been selected: not enough space there", - datanodes.get(2), datanode0Details); - - selectedCount - .put(datanode0Details, selectedCount.get(datanode0Details) + 1); - - } - - //datanode 4 has less space. Should be selected less times. - Assert.assertTrue(selectedCount.get(datanodes.get(3)) > selectedCount - .get(datanodes.get(6))); - Assert.assertTrue(selectedCount.get(datanodes.get(4)) > selectedCount - .get(datanodes.get(6))); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java deleted file mode 100644 index b652b6b76b5f3..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; - -import org.junit.Assert; -import org.junit.Test; -import static org.mockito.Matchers.anyObject; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; - -public class TestSCMContainerPlacementRandom { - - @Test - public void chooseDatanodes() throws SCMException { - //given - Configuration conf = new OzoneConfiguration(); - - List datanodes = new ArrayList<>(); - for (int i = 0; i < 5; i++) { - datanodes.add(TestUtils.randomDatanodeDetails()); - } - - NodeManager mockNodeManager = Mockito.mock(NodeManager.class); - when(mockNodeManager.getNodes(NodeState.HEALTHY)) - .thenReturn(new ArrayList<>(datanodes)); - - when(mockNodeManager.getNodeStat(anyObject())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L)); - when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); - - SCMContainerPlacementRandom scmContainerPlacementRandom = - new SCMContainerPlacementRandom(mockNodeManager, conf); - - List existingNodes = new ArrayList<>(); - existingNodes.add(datanodes.get(0)); - existingNodes.add(datanodes.get(1)); - - for (int i = 0; i < 100; i++) { - //when - List datanodeDetails = - scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15); - - //then - Assert.assertEquals(1, datanodeDetails.size()); - DatanodeDetails datanode0Details = datanodeDetails.get(0); - - Assert.assertNotEquals( - "Datanode 0 should not been selected: excluded by parameter", - datanodes.get(0), datanode0Details); - Assert.assertNotEquals( - "Datanode 1 should not been selected: excluded by parameter", - datanodes.get(1), datanode0Details); - Assert.assertNotEquals( - "Datanode 2 should not been selected: not enough space there", - datanodes.get(2), datanode0Details); - - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationActivityStatus.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationActivityStatus.java deleted file mode 100644 index a4615fc1a7ba7..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationActivityStatus.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.replication; - -import static org.junit.Assert.*; - -import java.util.concurrent.TimeoutException; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.BeforeClass; -import org.junit.Test; - -/** - * Tests for ReplicationActivityStatus. - */ -public class TestReplicationActivityStatus { - - private static EventQueue eventQueue; - private static ReplicationActivityStatus replicationActivityStatus; - - @BeforeClass - public static void setup() { - eventQueue = new EventQueue(); - replicationActivityStatus = new ReplicationActivityStatus(); - eventQueue.addHandler(SCMEvents.START_REPLICATION, - replicationActivityStatus.getReplicationStatusListener()); - eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, - replicationActivityStatus.getChillModeStatusListener()); - } - - @Test - public void testReplicationStatusForChillMode() - throws TimeoutException, InterruptedException { - assertFalse(replicationActivityStatus.isReplicationEnabled()); - // In chill mode replication process should be stopped. - eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, true); - assertFalse(replicationActivityStatus.isReplicationEnabled()); - - // Replication should be enabled when chill mode if off. - eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false); - GenericTestUtils.waitFor(() -> { - return replicationActivityStatus.isReplicationEnabled(); - }, 10, 1000*5); - assertTrue(replicationActivityStatus.isReplicationEnabled()); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java deleted file mode 100644 index 06beb7c174250..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ /dev/null @@ -1,238 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationRequestToRepeat; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; - -import com.google.common.base.Preconditions; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.TRACK_REPLICATE_COMMAND; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import static org.mockito.Matchers.anyObject; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; - -/** - * Test behaviour of the TestReplication. - */ -public class TestReplicationManager { - - private EventQueue queue; - - private List trackReplicationEvents; - - private List> copyEvents; - - private ContainerStateManager containerStateManager; - - private ContainerPlacementPolicy containerPlacementPolicy; - private List listOfDatanodeDetails; - private LeaseManager leaseManager; - private ReplicationManager replicationManager; - - @Before - public void initReplicationManager() throws IOException { - - listOfDatanodeDetails = new ArrayList<>(); - listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails()); - listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails()); - listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails()); - listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails()); - listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails()); - - containerPlacementPolicy = - (excludedNodes, nodesRequired, sizeRequired) -> listOfDatanodeDetails - .subList(2, 2 + nodesRequired); - - containerStateManager = Mockito.mock(ContainerStateManager.class); - - ContainerInfo containerInfo = new ContainerInfo.Builder() - .setState(LifeCycleState.CLOSED) - .build(); - - when(containerStateManager.getContainer(anyObject())) - .thenReturn(containerInfo); - - when(containerStateManager.getContainerReplicas(new ContainerID(1L))) - .thenReturn(new HashSet<>(Arrays.asList( - listOfDatanodeDetails.get(0), - listOfDatanodeDetails.get(1) - ))); - - - when(containerStateManager.getContainerReplicas(new ContainerID(3L))) - .thenReturn(new HashSet<>()); - - queue = new EventQueue(); - - trackReplicationEvents = new ArrayList<>(); - queue.addHandler(TRACK_REPLICATE_COMMAND, - (event, publisher) -> trackReplicationEvents.add(event)); - - copyEvents = new ArrayList<>(); - queue.addHandler(SCMEvents.DATANODE_COMMAND, - (event, publisher) -> copyEvents.add(event)); - - leaseManager = new LeaseManager<>("Test", 100000L); - - replicationManager = new ReplicationManager(containerPlacementPolicy, - containerStateManager, queue, leaseManager); - - - - } - - /** - * Container should be replicated but no source replicas. - */ - @Test() - public void testNoExistingReplicas() throws InterruptedException { - try { - leaseManager.start(); - replicationManager.start(); - - //WHEN - queue.fireEvent(SCMEvents.REPLICATE_CONTAINER, - new ReplicationRequest(3L, (short) 2, System.currentTimeMillis(), - (short) 3)); - - Thread.sleep(500L); - queue.processAll(1000L); - - //THEN - Assert.assertEquals(0, trackReplicationEvents.size()); - Assert.assertEquals(0, copyEvents.size()); - - } finally { - if (leaseManager != null) { - leaseManager.shutdown(); - } - } - } - - @Test - public void testEventSending() throws InterruptedException, IOException { - - //GIVEN - try { - leaseManager.start(); - - replicationManager.start(); - - //WHEN - queue.fireEvent(SCMEvents.REPLICATE_CONTAINER, - new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(), - (short) 3)); - - Thread.sleep(500L); - queue.processAll(1000L); - - //THEN - Assert.assertEquals(1, trackReplicationEvents.size()); - Assert.assertEquals(1, copyEvents.size()); - } finally { - if (leaseManager != null) { - leaseManager.shutdown(); - } - } - } - - @Test - public void testCommandWatcher() throws InterruptedException, IOException { - LeaseManager rapidLeaseManager = - new LeaseManager<>("Test", 1000L); - - replicationManager = new ReplicationManager(containerPlacementPolicy, - containerStateManager, queue, rapidLeaseManager); - - try { - rapidLeaseManager.start(); - replicationManager.start(); - - queue.fireEvent(SCMEvents.REPLICATE_CONTAINER, - new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(), - (short) 3)); - - Thread.sleep(500L); - - queue.processAll(1000L); - - Assert.assertEquals(1, trackReplicationEvents.size()); - Assert.assertEquals(1, copyEvents.size()); - - Assert.assertEquals(trackReplicationEvents.get(0).getId(), - copyEvents.get(0).getCommand().getId()); - - //event is timed out - Thread.sleep(1500); - - queue.processAll(1000L); - - //original copy command + retry - Assert.assertEquals(2, trackReplicationEvents.size()); - Assert.assertEquals(2, copyEvents.size()); - - } finally { - if (rapidLeaseManager != null) { - rapidLeaseManager.shutdown(); - } - } - } - - public static Pipeline createPipeline(Iterable ids) - throws IOException { - Objects.requireNonNull(ids, "ids == null"); - final Iterator i = ids.iterator(); - Preconditions.checkArgument(i.hasNext()); - final DatanodeDetails leader = i.next(); - final Pipeline pipeline = - new Pipeline(leader.getUuidString(), LifeCycleState.OPEN, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, - PipelineID.randomId()); - pipeline.addMember(leader); - while (i.hasNext()) { - pipeline.addMember(i.next()); - } - return pipeline; - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java deleted file mode 100644 index 9dd4fe31c50fb..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.util.Random; -import java.util.UUID; -import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for ReplicationQueue. - */ -public class TestReplicationQueue { - - private ReplicationQueue replicationQueue; - private Random random; - - @Before - public void setUp() { - replicationQueue = new ReplicationQueue(); - random = new Random(); - } - - @Test - public void testDuplicateAddOp() throws InterruptedException { - long contId = random.nextLong(); - String nodeId = UUID.randomUUID().toString(); - ReplicationRequest obj1, obj2, obj3; - long time = Time.monotonicNow(); - obj1 = new ReplicationRequest(contId, (short) 2, time, (short) 3); - obj2 = new ReplicationRequest(contId, (short) 2, time + 1, (short) 3); - obj3 = new ReplicationRequest(contId, (short) 1, time+2, (short) 3); - - replicationQueue.add(obj1); - replicationQueue.add(obj2); - replicationQueue.add(obj3); - Assert.assertEquals("Should add only 1 msg as second one is duplicate", - 1, replicationQueue.size()); - ReplicationRequest temp = replicationQueue.take(); - Assert.assertEquals(temp, obj3); - } - - @Test - public void testPollOp() throws InterruptedException { - long contId = random.nextLong(); - String nodeId = UUID.randomUUID().toString(); - ReplicationRequest msg1, msg2, msg3, msg4, msg5; - msg1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(), - (short) 3); - long time = Time.monotonicNow(); - msg2 = new ReplicationRequest(contId + 1, (short) 4, time, (short) 3); - msg3 = new ReplicationRequest(contId + 2, (short) 0, time, (short) 3); - msg4 = new ReplicationRequest(contId, (short) 2, time, (short) 3); - // Replication message for same container but different nodeId - msg5 = new ReplicationRequest(contId + 1, (short) 2, time, (short) 3); - - replicationQueue.add(msg1); - replicationQueue.add(msg2); - replicationQueue.add(msg3); - replicationQueue.add(msg4); - replicationQueue.add(msg5); - Assert.assertEquals("Should have 3 objects", - 3, replicationQueue.size()); - - // Since Priority queue orders messages according to replication count, - // message with lowest replication should be first - ReplicationRequest temp; - temp = replicationQueue.take(); - Assert.assertEquals("Should have 2 objects", - 2, replicationQueue.size()); - Assert.assertEquals(temp, msg3); - - temp = replicationQueue.take(); - Assert.assertEquals("Should have 1 objects", - 1, replicationQueue.size()); - Assert.assertEquals(temp, msg5); - - // Message 2 should be ordered before message 5 as both have same - // replication number but message 2 has earlier timestamp. - temp = replicationQueue.take(); - Assert.assertEquals("Should have 0 objects", - replicationQueue.size(), 0); - Assert.assertEquals(temp, msg4); - } - - @Test - public void testRemoveOp() { - long contId = random.nextLong(); - String nodeId = UUID.randomUUID().toString(); - ReplicationRequest obj1, obj2, obj3; - obj1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(), - (short) 3); - obj2 = new ReplicationRequest(contId + 1, (short) 2, Time.monotonicNow(), - (short) 3); - obj3 = new ReplicationRequest(contId + 2, (short) 3, Time.monotonicNow(), - (short) 3); - - replicationQueue.add(obj1); - replicationQueue.add(obj2); - replicationQueue.add(obj3); - Assert.assertEquals("Should have 3 objects", - 3, replicationQueue.size()); - - replicationQueue.remove(obj3); - Assert.assertEquals("Should have 2 objects", - 2, replicationQueue.size()); - - replicationQueue.remove(obj2); - Assert.assertEquals("Should have 1 objects", - 1, replicationQueue.size()); - - replicationQueue.remove(obj1); - Assert.assertEquals("Should have 0 objects", - 0, replicationQueue.size()); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java deleted file mode 100644 index 1423c99938158..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * SCM Testing and Mocking Utils. - */ -package org.apache.hadoop.hdds.scm.container.replication; -// Test classes for Replication functionality. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java deleted file mode 100644 index 63cc9bfd78938..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.Arrays; -import java.util.List; - -/** - * Test ContainerAttribute management. - */ -public class TestContainerAttribute { - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Test - public void testInsert() throws SCMException { - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - ContainerID id = new ContainerID(42); - containerAttribute.insert(1, id); - Assert.assertEquals(1, - containerAttribute.getCollection(1).size()); - Assert.assertTrue(containerAttribute.getCollection(1).contains(id)); - - // Insert again and verify that it overwrites an existing value. - ContainerID newId = - new ContainerID(42); - containerAttribute.insert(1, newId); - Assert.assertEquals(1, - containerAttribute.getCollection(1).size()); - Assert.assertTrue(containerAttribute.getCollection(1).contains(newId)); - } - - @Test - public void testHasKey() throws SCMException { - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - - for (int x = 1; x < 42; x++) { - containerAttribute.insert(1, new ContainerID(x)); - } - Assert.assertTrue(containerAttribute.hasKey(1)); - for (int x = 1; x < 42; x++) { - Assert.assertTrue(containerAttribute.hasContainerID(1, x)); - } - - Assert.assertFalse(containerAttribute.hasContainerID(1, - new ContainerID(42))); - } - - @Test - public void testClearSet() throws SCMException { - List keyslist = Arrays.asList("Key1", "Key2", "Key3"); - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - for (String k : keyslist) { - for (int x = 1; x < 101; x++) { - containerAttribute.insert(k, new ContainerID(x)); - } - } - for (String k : keyslist) { - Assert.assertEquals(100, - containerAttribute.getCollection(k).size()); - } - containerAttribute.clearSet("Key1"); - Assert.assertEquals(0, - containerAttribute.getCollection("Key1").size()); - } - - @Test - public void testRemove() throws SCMException { - - List keyslist = Arrays.asList("Key1", "Key2", "Key3"); - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - - for (String k : keyslist) { - for (int x = 1; x < 101; x++) { - containerAttribute.insert(k, new ContainerID(x)); - } - } - for (int x = 1; x < 101; x += 2) { - containerAttribute.remove("Key1", new ContainerID(x)); - } - - for (int x = 1; x < 101; x += 2) { - Assert.assertFalse(containerAttribute.hasContainerID("Key1", - new ContainerID(x))); - } - - Assert.assertEquals(100, - containerAttribute.getCollection("Key2").size()); - - Assert.assertEquals(100, - containerAttribute.getCollection("Key3").size()); - - Assert.assertEquals(50, - containerAttribute.getCollection("Key1").size()); - } - - @Test - public void tesUpdate() throws SCMException { - String key1 = "Key1"; - String key2 = "Key2"; - String key3 = "Key3"; - - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - ContainerID id = new ContainerID(42); - - containerAttribute.insert(key1, id); - Assert.assertTrue(containerAttribute.hasContainerID(key1, id)); - Assert.assertFalse(containerAttribute.hasContainerID(key2, id)); - - // This should move the id from key1 bucket to key2 bucket. - containerAttribute.update(key1, key2, id); - Assert.assertFalse(containerAttribute.hasContainerID(key1, id)); - Assert.assertTrue(containerAttribute.hasContainerID(key2, id)); - - // This should fail since we cannot find this id in the key3 bucket. - thrown.expect(SCMException.class); - containerAttribute.update(key3, key1, id); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java deleted file mode 100644 index 795dfc1e1b9b2..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.container.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java deleted file mode 100644 index 6f8534d9be279..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.container.ContainerMapping; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.test.PathUtils; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Test for different container placement policy. - */ -public class TestContainerPlacement { - @Rule - public ExpectedException thrown = ExpectedException.none(); - private static XceiverClientManager xceiverClientManager = - new XceiverClientManager(new OzoneConfiguration()); - - /** - * Returns a new copy of Configuration. - * - * @return Config - */ - OzoneConfiguration getConf() { - return new OzoneConfiguration(); - } - - /** - * Creates a NodeManager. - * - * @param config - Config for the node manager. - * @return SCNNodeManager - * @throws IOException - */ - - SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException { - EventQueue eventQueue = new EventQueue(); - eventQueue.addHandler(SCMEvents.NEW_NODE, - Mockito.mock(NewNodeHandler.class)); - eventQueue.addHandler(SCMEvents.STALE_NODE, - Mockito.mock(StaleNodeHandler.class)); - eventQueue.addHandler(SCMEvents.DEAD_NODE, - Mockito.mock(DeadNodeHandler.class)); - SCMNodeManager nodeManager = new SCMNodeManager(config, - UUID.randomUUID().toString(), null, eventQueue); - assertFalse("Node manager should be in chill mode", - nodeManager.isOutOfChillMode()); - return nodeManager; - } - - ContainerMapping createContainerManager(Configuration config, - NodeManager scmNodeManager) throws IOException { - EventQueue eventQueue = new EventQueue(); - final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - return new ContainerMapping(config, scmNodeManager, cacheSize, eventQueue); - - } - - /** - * Test capacity based container placement policy with node reports. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - public void testContainerPlacementCapacity() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int nodeCount = 4; - final long capacity = 10L * OzoneConsts.GB; - final long used = 2L * OzoneConsts.GB; - final long remaining = capacity - used; - - final File testDir = PathUtils.getTestDir( - TestContainerPlacement.class); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - - SCMNodeManager nodeManager = createNodeManager(conf); - ContainerMapping containerManager = - createContainerManager(conf, nodeManager); - List datanodes = - TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount); - try { - for (DatanodeDetails datanodeDetails : datanodes) { - nodeManager.processHeartbeat(datanodeDetails); - } - - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); - assertEquals(capacity * nodeCount, - (long) nodeManager.getStats().getCapacity().get()); - assertEquals(used * nodeCount, - (long) nodeManager.getStats().getScmUsed().get()); - assertEquals(remaining * nodeCount, - (long) nodeManager.getStats().getRemaining().get()); - - assertTrue(nodeManager.isOutOfChillMode()); - - ContainerWithPipeline containerWithPipeline = containerManager - .allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), "OZONE"); - assertEquals(xceiverClientManager.getFactor().getNumber(), - containerWithPipeline.getPipeline().getMachines().size()); - } finally { - IOUtils.closeQuietly(containerManager); - IOUtils.closeQuietly(nodeManager); - FileUtil.fullyDelete(testDir); - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java deleted file mode 100644 index 7bba032145d2b..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ /dev/null @@ -1,236 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.Mapping; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import static org.mockito.Matchers.eq; -import org.mockito.Mockito; - -/** - * Test DeadNodeHandler. - */ -public class TestDeadNodeHandler { - - private List sentEvents = new ArrayList<>(); - private SCMNodeManager nodeManager; - private ContainerStateManager containerStateManager; - private NodeReportHandler nodeReportHandler; - private DeadNodeHandler deadNodeHandler; - private EventPublisher publisher; - private EventQueue eventQueue; - - @Before - public void setup() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - containerStateManager = new ContainerStateManager(conf, - Mockito.mock(Mapping.class), - Mockito.mock(PipelineSelector.class)); - eventQueue = new EventQueue(); - nodeManager = new SCMNodeManager(conf, "cluster1", null, eventQueue); - deadNodeHandler = new DeadNodeHandler(nodeManager, - containerStateManager); - eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler); - publisher = Mockito.mock(EventPublisher.class); - nodeReportHandler = new NodeReportHandler(nodeManager); - } - - @Test - public void testOnMessage() throws IOException { - //GIVEN - DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails(); - - ContainerInfo container1 = - TestUtils.allocateContainer(containerStateManager); - ContainerInfo container2 = - TestUtils.allocateContainer(containerStateManager); - ContainerInfo container3 = - TestUtils.allocateContainer(containerStateManager); - - registerReplicas(datanode1, container1, container2); - registerReplicas(datanode2, container1, container3); - - registerReplicas(containerStateManager, container1, datanode1, datanode2); - registerReplicas(containerStateManager, container2, datanode1); - registerReplicas(containerStateManager, container3, datanode2); - - TestUtils.closeContainer(containerStateManager, container1); - - deadNodeHandler.onMessage(datanode1, publisher); - - Set container1Replicas = - containerStateManager.getContainerStateMap() - .getContainerReplicas(new ContainerID(container1.getContainerID())); - Assert.assertEquals(1, container1Replicas.size()); - Assert.assertEquals(datanode2, container1Replicas.iterator().next()); - - Set container2Replicas = - containerStateManager.getContainerStateMap() - .getContainerReplicas(new ContainerID(container2.getContainerID())); - Assert.assertEquals(0, container2Replicas.size()); - - Set container3Replicas = - containerStateManager.getContainerStateMap() - .getContainerReplicas(new ContainerID(container3.getContainerID())); - Assert.assertEquals(1, container3Replicas.size()); - Assert.assertEquals(datanode2, container3Replicas.iterator().next()); - - ArgumentCaptor replicationRequestParameter = - ArgumentCaptor.forClass(ReplicationRequest.class); - - Mockito.verify(publisher) - .fireEvent(eq(SCMEvents.REPLICATE_CONTAINER), - replicationRequestParameter.capture()); - - Assert - .assertEquals(container1.getContainerID(), - replicationRequestParameter.getValue().getContainerId()); - Assert - .assertEquals(1, - replicationRequestParameter.getValue().getReplicationCount()); - Assert - .assertEquals(3, - replicationRequestParameter.getValue().getExpecReplicationCount()); - } - - @Test - public void testStatisticsUpdate() throws Exception { - //GIVEN - DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails(); - String storagePath1 = GenericTestUtils.getRandomizedTempPath() - .concat("/" + datanode1.getUuidString()); - String storagePath2 = GenericTestUtils.getRandomizedTempPath() - .concat("/" + datanode2.getUuidString()); - - StorageReportProto storageOne = TestUtils.createStorageReport( - datanode1.getUuid(), storagePath1, 100, 10, 90, null); - StorageReportProto storageTwo = TestUtils.createStorageReport( - datanode2.getUuid(), storagePath2, 200, 20, 180, null); - nodeReportHandler.onMessage(getNodeReport(datanode1, storageOne), - Mockito.mock(EventPublisher.class)); - nodeReportHandler.onMessage(getNodeReport(datanode2, storageTwo), - Mockito.mock(EventPublisher.class)); - - ContainerInfo container1 = - TestUtils.allocateContainer(containerStateManager); - registerReplicas(datanode1, container1); - - SCMNodeStat stat = nodeManager.getStats(); - Assert.assertTrue(stat.getCapacity().get() == 300); - Assert.assertTrue(stat.getRemaining().get() == 270); - Assert.assertTrue(stat.getScmUsed().get() == 30); - - SCMNodeMetric nodeStat = nodeManager.getNodeStat(datanode1); - Assert.assertTrue(nodeStat.get().getCapacity().get() == 100); - Assert.assertTrue(nodeStat.get().getRemaining().get() == 90); - Assert.assertTrue(nodeStat.get().getScmUsed().get() == 10); - - //WHEN datanode1 is dead. - eventQueue.fireEvent(SCMEvents.DEAD_NODE, datanode1); - Thread.sleep(100); - - //THEN statistics in SCM should changed. - stat = nodeManager.getStats(); - Assert.assertTrue(stat.getCapacity().get() == 200); - Assert.assertTrue(stat.getRemaining().get() == 180); - Assert.assertTrue(stat.getScmUsed().get() == 20); - - nodeStat = nodeManager.getNodeStat(datanode1); - Assert.assertTrue(nodeStat.get().getCapacity().get() == 0); - Assert.assertTrue(nodeStat.get().getRemaining().get() == 0); - Assert.assertTrue(nodeStat.get().getScmUsed().get() == 0); - } - - @Test - public void testOnMessageReplicaFailure() throws Exception { - DatanodeDetails dn1 = TestUtils.randomDatanodeDetails(); - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(DeadNodeHandler.getLogger()); - String storagePath1 = GenericTestUtils.getRandomizedTempPath() - .concat("/" + dn1.getUuidString()); - - StorageReportProto storageOne = TestUtils.createStorageReport( - dn1.getUuid(), storagePath1, 100, 10, 90, null); - nodeReportHandler.onMessage(getNodeReport(dn1, storageOne), - Mockito.mock(EventPublisher.class)); - - ContainerInfo container1 = - TestUtils.allocateContainer(containerStateManager); - registerReplicas(dn1, container1); - - deadNodeHandler.onMessage(dn1, eventQueue); - Assert.assertTrue(logCapturer.getOutput().contains( - "DataNode " + dn1.getUuid() + " doesn't have replica for container " - + container1.getContainerID())); - } - - private void registerReplicas(ContainerStateManager csm, - ContainerInfo container, DatanodeDetails... datanodes) { - csm.getContainerStateMap() - .addContainerReplica(new ContainerID(container.getContainerID()), - datanodes); - } - - private void registerReplicas(DatanodeDetails datanode, - ContainerInfo... containers) - throws SCMException { - nodeManager - .addDatanodeInContainerMap(datanode.getUuid(), - Arrays.stream(containers) - .map(container -> new ContainerID(container.getContainerID())) - .collect(Collectors.toSet())); - } - - private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, - StorageReportProto... reports) { - NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports); - return new NodeReportFromDatanode(dn, nodeReportProto); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java deleted file mode 100644 index cbe96eee8434a..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java +++ /dev/null @@ -1,1144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Supplier; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.PathUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.core.StringStartsWith.startsWith; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Test the Node Manager class. - */ -public class TestNodeManager { - - private File testDir; - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @BeforeClass - public static void init() throws IOException { - } - - @Before - public void setup() { - testDir = PathUtils.getTestDir( - TestNodeManager.class); - } - - @After - public void cleanup() { - FileUtil.fullyDelete(testDir); - } - - /** - * Returns a new copy of Configuration. - * - * @return Config - */ - OzoneConfiguration getConf() { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - TimeUnit.MILLISECONDS); - return conf; - } - - /** - * Creates a NodeManager. - * - * @param config - Config for the node manager. - * @return SCNNodeManager - * @throws IOException - */ - - SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException { - EventQueue eventQueue = new EventQueue(); - eventQueue.addHandler(SCMEvents.NEW_NODE, - Mockito.mock(NewNodeHandler.class)); - eventQueue.addHandler(SCMEvents.STALE_NODE, - Mockito.mock(StaleNodeHandler.class)); - eventQueue.addHandler(SCMEvents.DEAD_NODE, - Mockito.mock(DeadNodeHandler.class)); - SCMNodeManager nodeManager = new SCMNodeManager(config, - UUID.randomUUID().toString(), null, eventQueue); - assertFalse("Node manager should be in chill mode", - nodeManager.isOutOfChillMode()); - return nodeManager; - } - - /** - * Tests that Node manager handles heartbeats correctly, and comes out of - * chill Mode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmHeartbeat() throws IOException, - InterruptedException, TimeoutException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - // Send some heartbeats from different nodes. - for (int x = 0; x < nodeManager.getMinimumChillModeNodes(); x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - } - - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertTrue("Heartbeat thread should have picked up the" + - "scheduled heartbeats and transitioned out of chill mode.", - nodeManager.isOutOfChillMode()); - } - } - - /** - * asserts that if we send no heartbeats node manager stays in chillmode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmNoHeartbeats() throws IOException, - InterruptedException, TimeoutException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertFalse("No heartbeats, Node manager should have been in" + - " chill mode.", nodeManager.isOutOfChillMode()); - } - } - - /** - * Asserts that if we don't get enough unique nodes we stay in chillmode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmNotEnoughHeartbeats() throws IOException, - InterruptedException, TimeoutException { - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - - // Need 100 nodes to come out of chill mode, only one node is sending HB. - nodeManager.setMinimumChillModeNodes(100); - nodeManager.processHeartbeat(TestUtils - .createRandomDatanodeAndRegister(nodeManager)); - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertFalse("Not enough heartbeat, Node manager should have" + - "been in chillmode.", nodeManager.isOutOfChillMode()); - } - } - - /** - * Asserts that many heartbeat from the same node is counted as a single - * node. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmSameNodeHeartbeats() throws IOException, - InterruptedException, TimeoutException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - nodeManager.setMinimumChillModeNodes(3); - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - - // Send 10 heartbeat from same node, and assert we never leave chill mode. - for (int x = 0; x < 10; x++) { - nodeManager.processHeartbeat(datanodeDetails); - } - - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertFalse("Not enough nodes have send heartbeat to node" + - "manager.", nodeManager.isOutOfChillMode()); - } - } - - /** - * Asserts that adding heartbeats after shutdown does not work. This implies - * that heartbeat thread has been shutdown safely by closing the node - * manager. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmShutdown() throws IOException, InterruptedException, - TimeoutException { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - SCMNodeManager nodeManager = createNodeManager(conf); - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.close(); - - // These should never be processed. - nodeManager.processHeartbeat(datanodeDetails); - - // Let us just wait for 2 seconds to prove that HBs are not processed. - Thread.sleep(2 * 1000); - - //TODO: add assertion - } - - /** - * Asserts scm informs datanodes to re-register with the nodemanager - * on a restart. - * - * @throws Exception - */ - @Test - public void testScmHeartbeatAfterRestart() throws Exception { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - UUID dnId = datanodeDetails.getUuid(); - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = - TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); - try (SCMNodeManager nodemanager = createNodeManager(conf)) { - nodemanager.register(datanodeDetails, - TestUtils.createNodeReport(report), - TestUtils.getRandomPipelineReports()); - List command = nodemanager.processHeartbeat(datanodeDetails); - Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails)); - Assert.assertTrue("On regular HB calls, SCM responses a " - + "datanode with an empty command list", command.isEmpty()); - } - - // Sends heartbeat without registering to SCM. - // This happens when SCM restarts. - try (SCMNodeManager nodemanager = createNodeManager(conf)) { - Assert.assertFalse(nodemanager - .getAllNodes().contains(datanodeDetails)); - try { - // SCM handles heartbeat asynchronously. - // It may need more than one heartbeat processing to - // send the notification. - GenericTestUtils.waitFor(new Supplier() { - @Override public Boolean get() { - List command = - nodemanager.processHeartbeat(datanodeDetails); - return command.size() == 1 && command.get(0).getType() - .equals(SCMCommandProto.Type.reregisterCommand); - } - }, 100, 3 * 1000); - } catch (TimeoutException e) { - Assert.fail("Times out to verify that scm informs " - + "datanode to re-register itself."); - } - } - } - - /** - * Asserts that we detect as many healthy nodes as we have generated heartbeat - * for. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmHealthyNodeCount() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int count = 10; - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - - for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - } - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(count, nodeManager.getNodeCount(HEALTHY)); - } - } - - /** - * Asserts that if user provides a value less than 5 times the heartbeat - * interval as the StaleNode Value, we throw since that is a QoS that we - * cannot maintain. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - - @Test - public void testScmSanityOfUserConfig1() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int interval = 100; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - - // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, interval, MILLISECONDS); - - thrown.expect(IllegalArgumentException.class); - - // This string is a multiple of the interval value - thrown.expectMessage( - startsWith("100 is not within min = 500 or max = 100000")); - createNodeManager(conf); - } - - /** - * Asserts that if Stale Interval value is more than 5 times the value of HB - * processing thread it is a sane value. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmSanityOfUserConfig2() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int interval = 100; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS); - - // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, MILLISECONDS); - createNodeManager(conf).close(); - } - - /** - * Asserts that a single node moves from Healthy to stale node, then from - * stale node to dead node if it misses enough heartbeats. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmDetectStaleAndDeadNode() throws IOException, - InterruptedException, TimeoutException { - final int interval = 100; - final int nodeCount = 10; - - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List nodeList = createNodeSet(nodeManager, nodeCount); - - - DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister( - nodeManager); - - // Heartbeat once - nodeManager.processHeartbeat(staleNode); - - // Heartbeat all other nodes. - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // Wait for 2 seconds .. and heartbeat good nodes again. - Thread.sleep(2 * 1000); - - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // Wait for 2 seconds, wait a total of 4 seconds to make sure that the - // node moves into stale state. - Thread.sleep(2 * 1000); - List staleNodeList = nodeManager.getNodes(STALE); - assertEquals("Expected to find 1 stale node", - 1, nodeManager.getNodeCount(STALE)); - assertEquals("Expected to find 1 stale node", - 1, staleNodeList.size()); - assertEquals("Stale node is not the expected ID", staleNode - .getUuid(), staleNodeList.get(0).getUuid()); - Thread.sleep(1000); - - // heartbeat good nodes again. - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // 6 seconds is the dead window for this test , so we wait a total of - // 7 seconds to make sure that the node moves into dead state. - Thread.sleep(2 * 1000); - - // the stale node has been removed - staleNodeList = nodeManager.getNodes(STALE); - assertEquals("Expected to find 1 stale node", - 0, nodeManager.getNodeCount(STALE)); - assertEquals("Expected to find 1 stale node", - 0, staleNodeList.size()); - - // Check for the dead node now. - List deadNodeList = nodeManager.getNodes(DEAD); - assertEquals("Expected to find 1 dead node", 1, - nodeManager.getNodeCount(DEAD)); - assertEquals("Expected to find 1 dead node", - 1, deadNodeList.size()); - assertEquals("Dead node is not the expected ID", staleNode - .getUuid(), deadNodeList.get(0).getUuid()); - } - } - - /** - * Check for NPE when datanodeDetails is passed null for sendHeartbeat. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException, - InterruptedException, TimeoutException { - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - nodeManager.processHeartbeat(null); - } catch (NullPointerException npe) { - GenericTestUtils.assertExceptionContains("Heartbeat is missing " + - "DatanodeDetails.", npe); - } - } - - /** - * Asserts that a dead node, stale node and healthy nodes co-exist. The counts - * , lists and node ID match the expected node state. - *

- * This test is pretty complicated because it explores all states of Node - * manager in a single test. Please read thru the comments to get an idea of - * the current state of the node Manager. - *

- * This test is written like a state machine to avoid threads and concurrency - * issues. This test is replicated below with the use of threads. Avoiding - * threads make it easy to debug the state machine. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - /** - * These values are very important. Here is what it means so you don't - * have to look it up while reading this code. - * - * OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the - * HB processing thread that is running in the SCM. This thread must run - * for the SCM to process the Heartbeats. - * - * OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which - * datanodes will send heartbeats to SCM. Please note: This is the only - * config value for node manager that is specified in seconds. We don't - * want SCM heartbeat resolution to be more than in seconds. - * In this test it is not used, but we are forced to set it because we - * have validation code that checks Stale Node interval and Dead Node - * interval is larger than the value of - * OZONE_SCM_HEARTBEAT_INTERVAL. - * - * OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse - * from the last heartbeat for us to mark a node as stale. In this test - * we set that to 3. That is if a node has not heartbeat SCM for last 3 - * seconds we will mark it as stale. - * - * OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse - * from the last heartbeat for a node to be marked dead. We have an - * additional constraint that this must be at least 2 times bigger than - * Stale node Interval. - * - * With these we are trying to explore the state of this cluster with - * various timeouts. Each section is commented so that you can keep - * track of the state of the cluster nodes. - * - */ - - @Test - public void testScmClusterIsInExpectedState1() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - /** - * Cluster state: Healthy: All nodes are heartbeat-ing like normal. - */ - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails healthyNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - DatanodeDetails staleNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - DatanodeDetails deadNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - - // Sleep so that heartbeat processing thread gets to run. - Thread.sleep(500); - - //Assert all nodes are healthy. - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(HEALTHY)); - - /** - * Cluster state: Quiesced: We are going to sleep for 3 seconds. Which - * means that no node is heartbeating. All nodes should move to Stale. - */ - Thread.sleep(3 * 1000); - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(STALE)); - - - /** - * Cluster State : Move healthy node back to healthy state, move other 2 - * nodes to Stale State. - * - * We heartbeat healthy node after 1 second and let other 2 nodes elapse - * the 3 second windows. - */ - - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - - Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode); - Thread.sleep(2 * 1000); - assertEquals(1, nodeManager.getNodeCount(HEALTHY)); - - - // 3.5 seconds from last heartbeat for the stale and deadNode. So those - // 2 nodes must move to Stale state and the healthy node must - // remain in the healthy State. - List healthyList = nodeManager.getNodes(HEALTHY); - assertEquals("Expected one healthy node", 1, healthyList.size()); - assertEquals("Healthy node is not the expected ID", healthyNode - .getUuid(), healthyList.get(0).getUuid()); - - assertEquals(2, nodeManager.getNodeCount(STALE)); - - /** - * Cluster State: Allow healthyNode to remain in healthy state and - * staleNode to move to stale state and deadNode to move to dead state. - */ - - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode); - Thread.sleep(2 * 1000); - - // 3.5 seconds have elapsed for stale node, so it moves into Stale. - // 7 seconds have elapsed for dead node, so it moves into dead. - // 2 Seconds have elapsed for healthy node, so it stays in healhty state. - healthyList = nodeManager.getNodes(HEALTHY); - List staleList = nodeManager.getNodes(STALE); - List deadList = nodeManager.getNodes(DEAD); - - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(1, nodeManager.getNodeCount(HEALTHY)); - assertEquals(1, nodeManager.getNodeCount(STALE)); - assertEquals(1, nodeManager.getNodeCount(DEAD)); - - assertEquals("Expected one healthy node", - 1, healthyList.size()); - assertEquals("Healthy node is not the expected ID", healthyNode - .getUuid(), healthyList.get(0).getUuid()); - - assertEquals("Expected one stale node", - 1, staleList.size()); - assertEquals("Stale node is not the expected ID", staleNode - .getUuid(), staleList.get(0).getUuid()); - - assertEquals("Expected one dead node", - 1, deadList.size()); - assertEquals("Dead node is not the expected ID", deadNode - .getUuid(), deadList.get(0).getUuid()); - /** - * Cluster State : let us heartbeat all the nodes and verify that we get - * back all the nodes in healthy state. - */ - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - Thread.sleep(500); - //Assert all nodes are healthy. - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(HEALTHY)); - } - } - - /** - * Heartbeat a given set of nodes at a specified frequency. - * - * @param manager - Node Manager - * @param list - List of datanodeIDs - * @param sleepDuration - Duration to sleep between heartbeats. - * @throws InterruptedException - */ - private void heartbeatNodeSet(SCMNodeManager manager, - List list, - int sleepDuration) throws InterruptedException { - while (!Thread.currentThread().isInterrupted()) { - for (DatanodeDetails dn : list) { - manager.processHeartbeat(dn); - } - Thread.sleep(sleepDuration); - } - } - - /** - * Create a set of Nodes with a given prefix. - * - * @param count - number of nodes. - * @return List of Nodes. - */ - private List createNodeSet(SCMNodeManager nodeManager, int - count) { - List list = new LinkedList<>(); - for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - list.add(datanodeDetails); - } - return list; - } - - /** - * Function that tells us if we found the right number of stale nodes. - * - * @param nodeManager - node manager - * @param count - number of stale nodes to look for. - * @return true if we found the expected number. - */ - private boolean findNodes(NodeManager nodeManager, int count, - HddsProtos.NodeState state) { - return count == nodeManager.getNodeCount(state); - } - - /** - * Asserts that we can create a set of nodes that send its heartbeats from - * different threads and NodeManager behaves as expected. - * - * @throws IOException - * @throws InterruptedException - */ - @Test - public void testScmClusterIsInExpectedState2() throws IOException, - InterruptedException, TimeoutException { - final int healthyCount = 5000; - final int staleCount = 100; - final int deadCount = 10; - - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List healthyNodeList = createNodeSet(nodeManager, - healthyCount); - List staleNodeList = createNodeSet(nodeManager, - staleCount); - List deadNodeList = createNodeSet(nodeManager, - deadCount); - - Runnable healthyNodeTask = () -> { - try { - // 2 second heartbeat makes these nodes stay healthy. - heartbeatNodeSet(nodeManager, healthyNodeList, 2 * 1000); - } catch (InterruptedException ignored) { - } - }; - - Runnable staleNodeTask = () -> { - try { - // 4 second heartbeat makes these nodes go to stale and back to - // healthy again. - heartbeatNodeSet(nodeManager, staleNodeList, 4 * 1000); - } catch (InterruptedException ignored) { - } - }; - - - // No Thread just one time HBs the node manager, so that these will be - // marked as dead nodes eventually. - for (DatanodeDetails dn : deadNodeList) { - nodeManager.processHeartbeat(dn); - } - - - Thread thread1 = new Thread(healthyNodeTask); - thread1.setDaemon(true); - thread1.start(); - - - Thread thread2 = new Thread(staleNodeTask); - thread2.setDaemon(true); - thread2.start(); - - Thread.sleep(10 * 1000); - - // Assert all healthy nodes are healthy now, this has to be a greater - // than check since Stale nodes can be healthy when we check the state. - - assertTrue(nodeManager.getNodeCount(HEALTHY) >= healthyCount); - - assertEquals(deadCount, nodeManager.getNodeCount(DEAD)); - - List deadList = nodeManager.getNodes(DEAD); - - for (DatanodeDetails node : deadList) { - assertTrue(deadNodeList.contains(node)); - } - - - - // Checking stale nodes is tricky since they have to move between - // healthy and stale to avoid becoming dead nodes. So we search for - // that state for a while, if we don't find that state waitfor will - // throw. - GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE), - 500, 4 * 1000); - - thread1.interrupt(); - thread2.interrupt(); - } - } - - /** - * Asserts that we can handle 6000+ nodes heartbeating SCM. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmCanHandleScale() throws IOException, - InterruptedException, TimeoutException { - final int healthyCount = 3000; - final int staleCount = 3000; - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, - SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, - MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6 * 1000, - MILLISECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List healthyList = createNodeSet(nodeManager, - healthyCount); - List staleList = createNodeSet(nodeManager, - staleCount); - - Runnable healthyNodeTask = () -> { - try { - heartbeatNodeSet(nodeManager, healthyList, 2 * 1000); - } catch (InterruptedException ignored) { - - } - }; - - Runnable staleNodeTask = () -> { - try { - heartbeatNodeSet(nodeManager, staleList, 4 * 1000); - } catch (InterruptedException ignored) { - } - }; - - Thread thread1 = new Thread(healthyNodeTask); - thread1.setDaemon(true); - thread1.start(); - - Thread thread2 = new Thread(staleNodeTask); - thread2.setDaemon(true); - thread2.start(); - Thread.sleep(3 * 1000); - - GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE), - 500, 20 * 1000); - assertEquals("Node count mismatch", - healthyCount + staleCount, nodeManager.getAllNodes().size()); - - thread1.interrupt(); - thread2.interrupt(); - } - } - - - @Test - public void testScmEnterAndExitChillMode() throws IOException, - InterruptedException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - nodeManager.setMinimumChillModeNodes(10); - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - String status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, containsString("Still in chill " + - "mode, waiting on nodes to report in.")); - - // Should not exit chill mode since 10 nodes have not heartbeat yet. - assertFalse(nodeManager.isOutOfChillMode()); - - // Force exit chill mode. - nodeManager.forceExitChillMode(); - assertTrue(nodeManager.isOutOfChillMode()); - status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, - containsString("Out of chill mode.")); - - - // Enter back to into chill mode. - nodeManager.enterChillMode(); - assertFalse(nodeManager.isOutOfChillMode()); - status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, - containsString("Out of startup chill mode," + - " but in manual chill mode.")); - - // Assert that node manager force enter cannot be overridden by nodes HBs. - for (int x = 0; x < 20; x++) { - DatanodeDetails datanode = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanode); - } - - Thread.sleep(500); - assertFalse(nodeManager.isOutOfChillMode()); - - // Make sure that once we exit out of manual chill mode, we fall back - // to the number of nodes to get out chill mode. - nodeManager.exitChillMode(); - assertTrue(nodeManager.isOutOfChillMode()); - status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, - containsString("Out of chill mode.")); - } - } - - /** - * Test multiple nodes sending initial heartbeat with their node report. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - // TODO: Enable this after we implement NodeReportEvent handler. - public void testScmStatsFromNodeReport() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, - MILLISECONDS); - final int nodeCount = 10; - final long capacity = 2000; - final long used = 100; - final long remaining = capacity - used; - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - for (int x = 0; x < nodeCount; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - UUID dnId = datanodeDetails.getUuid(); - long free = capacity - used; - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils - .createStorageReport(dnId, storagePath, capacity, used, free, null); - nodeManager.processHeartbeat(datanodeDetails); - } - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); - assertEquals(capacity * nodeCount, (long) nodeManager.getStats() - .getCapacity().get()); - assertEquals(used * nodeCount, (long) nodeManager.getStats() - .getScmUsed().get()); - assertEquals(remaining * nodeCount, (long) nodeManager.getStats() - .getRemaining().get()); - } - } - - /** - * Test single node stat update based on nodereport from different heartbeat - * status (healthy, stale and dead). - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - // TODO: Enable this after we implement NodeReportEvent handler. - public void testScmNodeReportUpdate() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int heartbeatCount = 5; - final int nodeCount = 1; - final int interval = 100; - - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails datanodeDetails = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - final long capacity = 2000; - final long usedPerHeartbeat = 100; - UUID dnId = datanodeDetails.getUuid(); - for (int x = 0; x < heartbeatCount; x++) { - long scmUsed = x * usedPerHeartbeat; - long remaining = capacity - scmUsed; - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils - .createStorageReport(dnId, storagePath, capacity, scmUsed, - remaining, null); - - nodeManager.processHeartbeat(datanodeDetails); - Thread.sleep(100); - } - - final long expectedScmUsed = usedPerHeartbeat * (heartbeatCount - 1); - final long expectedRemaining = capacity - expectedScmUsed; - - GenericTestUtils.waitFor( - () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, - 100, 4 * 1000); - - long foundCapacity = nodeManager.getStats().getCapacity().get(); - assertEquals(capacity, foundCapacity); - - long foundScmUsed = nodeManager.getStats().getScmUsed().get(); - assertEquals(expectedScmUsed, foundScmUsed); - - long foundRemaining = nodeManager.getStats().getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Test NodeManager#getNodeStats - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - long nodeCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, nodeCapacity); - - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed() - .get(); - assertEquals(expectedScmUsed, foundScmUsed); - - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get() - .getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Compare the result from - // NodeManager#getNodeStats and NodeManager#getNodeStat - SCMNodeStat stat1 = nodeManager.getNodeStats(). - get(datanodeDetails.getUuid()); - SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeDetails).get(); - assertEquals(stat1, stat2); - - // Wait up to 4s so that the node becomes stale - // Verify the usage info should be unchanged. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(STALE) == 1, 100, - 4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - - foundCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, foundCapacity); - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get() - .getScmUsed().get(); - assertEquals(expectedScmUsed, foundScmUsed); - - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get(). - getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Wait up to 4 more seconds so the node becomes dead - // Verify usage info should be updated. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(DEAD) == 1, 100, - 4 * 1000); - - assertEquals(0, nodeManager.getNodeStats().size()); - foundCapacity = nodeManager.getStats().getCapacity().get(); - assertEquals(0, foundCapacity); - - foundScmUsed = nodeManager.getStats().getScmUsed().get(); - assertEquals(0, foundScmUsed); - - foundRemaining = nodeManager.getStats().getRemaining().get(); - assertEquals(0, foundRemaining); - - nodeManager.processHeartbeat(datanodeDetails); - - // Wait up to 5 seconds so that the dead node becomes healthy - // Verify usage info should be updated. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(HEALTHY) == 1, - 100, 5 * 1000); - GenericTestUtils.waitFor( - () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, - 100, 4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - foundCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, foundCapacity); - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed() - .get(); - assertEquals(expectedScmUsed, foundScmUsed); - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get() - .getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - } - } - - @Test - public void testHandlingSCMCommandEvent() { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - UUID dnId = datanodeDetails.getUuid(); - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = - TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); - - EventQueue eq = new EventQueue(); - try (SCMNodeManager nodemanager = createNodeManager(conf)) { - eq.addHandler(DATANODE_COMMAND, nodemanager); - - nodemanager - .register(datanodeDetails, TestUtils.createNodeReport(report), - TestUtils.getRandomPipelineReports()); - eq.fireEvent(DATANODE_COMMAND, - new CommandForDatanode<>(datanodeDetails.getUuid(), - new CloseContainerCommand(1L, ReplicationType.STAND_ALONE, - PipelineID.randomId()))); - - eq.processAll(1000L); - List command = - nodemanager.processHeartbeat(datanodeDetails); - Assert.assertEquals(1, command.size()); - Assert - .assertEquals(command.get(0).getClass(), CloseContainerCommand.class); - } catch (IOException e) { - e.printStackTrace(); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java deleted file mode 100644 index f9b139294f78c..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import java.io.IOException; -import java.util.UUID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TestNodeReportHandler implements EventPublisher { - - private static final Logger LOG = LoggerFactory - .getLogger(TestNodeReportHandler.class); - private NodeReportHandler nodeReportHandler; - private SCMNodeManager nodeManager; - private String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/" + UUID.randomUUID().toString()); - - @Before - public void resetEventCollector() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - nodeManager = new SCMNodeManager(conf, "cluster1", null, new EventQueue()); - nodeReportHandler = new NodeReportHandler(nodeManager); - } - - @Test - public void testNodeReport() throws IOException { - DatanodeDetails dn = TestUtils.randomDatanodeDetails(); - StorageReportProto storageOne = TestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - - SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn); - Assert.assertNull(nodeMetric); - - nodeReportHandler.onMessage( - getNodeReport(dn, storageOne), this); - nodeMetric = nodeManager.getNodeStat(dn); - - Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100); - Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90); - Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10); - - StorageReportProto storageTwo = TestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - nodeReportHandler.onMessage( - getNodeReport(dn, storageOne, storageTwo), this); - nodeMetric = nodeManager.getNodeStat(dn); - - Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200); - Assert.assertTrue(nodeMetric.get().getRemaining().get() == 180); - Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 20); - - } - - private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, - StorageReportProto... reports) { - NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports); - return new NodeReportFromDatanode(dn, nodeReportProto); - } - - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - LOG.info("Event is published: {}", payload); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java deleted file mode 100644 index 623fc16a92462..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java +++ /dev/null @@ -1,262 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.*; -import org.junit.Rule; -import org.junit.rules.ExpectedException; - -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.Set; -import java.util.ArrayList; -import java.util.HashSet; -import java.io.IOException; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Test Node Storage Map. - */ -public class TestSCMNodeStorageStatMap { - private final static int DATANODE_COUNT = 100; - private final long capacity = 10L * OzoneConsts.GB; - private final long used = 2L * OzoneConsts.GB; - private final long remaining = capacity - used; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private final Map> testData = - new ConcurrentHashMap<>(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void generateData() { - for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { - UUID dnId = UUID.randomUUID(); - Set reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + - Integer.toString(dnIndex)); - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK).setId(dnId.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); - reportSet.add(builder.build()); - testData.put(UUID.randomUUID(), reportSet); - } - } - - private UUID getFirstKey() { - return testData.keySet().iterator().next(); - } - - @Before - public void setUp() throws Exception { - generateData(); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testIsKnownDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID knownNode = getFirstKey(); - UUID unknownNode = UUID.randomUUID(); - Set report = testData.get(knownNode); - map.insertNewDatanode(knownNode, report); - Assert.assertTrue("Not able to detect a known node", - map.isKnownDatanode(knownNode)); - Assert.assertFalse("Unknown node detected", - map.isKnownDatanode(unknownNode)); - } - - @Test - public void testInsertNewDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID knownNode = getFirstKey(); - Set report = testData.get(knownNode); - map.insertNewDatanode(knownNode, report); - Assert.assertEquals(map.getStorageVolumes(knownNode), - testData.get(knownNode)); - thrown.expect(SCMException.class); - thrown.expectMessage("already exists"); - map.insertNewDatanode(knownNode, report); - } - - @Test - public void testUpdateUnknownDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID unknownNode = UUID.randomUUID(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode - .toString()); - Set reportSet = new HashSet<>(); - StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK).setId(unknownNode.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); - reportSet.add(builder.build()); - thrown.expect(SCMException.class); - thrown.expectMessage("No such datanode"); - map.updateDatanodeMap(unknownNode, reportSet); - } - - @Test - public void testProcessNodeReportCheckOneNode() throws IOException { - UUID key = getFirstKey(); - List reportList = new ArrayList<>(); - Set reportSet = testData.get(key); - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - map.insertNewDatanode(key, reportSet); - Assert.assertTrue(map.isKnownDatanode(key)); - UUID storageId = UUID.randomUUID(); - String path = - GenericTestUtils.getRandomizedTempPath().concat("/" + storageId); - StorageLocationReport report = reportSet.iterator().next(); - long reportCapacity = report.getCapacity(); - long reportScmUsed = report.getScmUsed(); - long reportRemaining = report.getRemaining(); - StorageReportProto storageReport = TestUtils.createStorageReport(storageId, - path, reportCapacity, reportScmUsed, reportRemaining, null); - StorageReportResult result = - map.processNodeReport(key, TestUtils.createNodeReport(storageReport)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL); - StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb = - NodeReportProto.newBuilder(); - StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage(); - reportList.add(srb); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL); - - reportList.add(TestUtils - .createStorageReport(UUID.randomUUID(), path, reportCapacity, - reportCapacity, 0, null)); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE); - // Mark a disk failed - StorageReportProto srb2 = StorageReportProto.newBuilder() - .setStorageUuid(UUID.randomUUID().toString()) - .setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity) - .setCapacity(reportCapacity).setRemaining(0).setFailed(true).build(); - reportList.add(srb2); - nrb.addAllStorageReport(reportList); - result = map.processNodeReport(key, nrb.addStorageReport(srb).build()); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE); - - } - - @Test - public void testProcessMultipleNodeReports() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - int counter = 1; - // Insert all testData into the SCMNodeStorageStatMap Map. - for (Map.Entry> keyEntry : testData - .entrySet()) { - map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue()); - } - Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity()); - Assert.assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace()); - Assert.assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed()); - - // upadate 1/4th of the datanode to be full - for (Map.Entry> keyEntry : testData - .entrySet()) { - Set reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry - .getKey().toString()); - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK) - .setId(keyEntry.getKey().toString()).setStorageLocation(path) - .setScmUsed(capacity).setRemaining(0).setCapacity(capacity) - .setFailed(false); - reportSet.add(builder.build()); - - map.updateDatanodeMap(keyEntry.getKey(), reportSet); - counter++; - if (counter > DATANODE_COUNT / 4) { - break; - } - } - Assert.assertEquals(DATANODE_COUNT / 4, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL) - .size()); - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN) - .size()); - Assert.assertEquals(0.75 * DATANODE_COUNT, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL) - .size(), 0); - - Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0); - Assert.assertEquals(0.75 * DATANODE_COUNT * remaining, - map.getTotalFreeSpace(), 0); - Assert.assertEquals( - 0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * capacity), - map.getTotalSpaceUsed(), 0); - counter = 1; - // Remove 1/4 of the DataNodes from the Map - for (Map.Entry> keyEntry : testData - .entrySet()) { - map.removeDatanode(keyEntry.getKey()); - counter++; - if (counter > DATANODE_COUNT / 4) { - break; - } - } - - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL) - .size()); - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN) - .size()); - Assert.assertEquals(0.75 * DATANODE_COUNT, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL) - .size(), 0); - - Assert - .assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), - 0); - Assert.assertEquals(0.75 * DATANODE_COUNT * remaining, - map.getTotalFreeSpace(), 0); - Assert - .assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0); - - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java deleted file mode 100644 index dfd8397771230..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.node; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java deleted file mode 100644 index ec1d5279d0917..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Test classes for Node2ContainerMap. - */ -public class TestNode2ContainerMap { - private final static int DATANODE_COUNT = 300; - private final static int CONTAINER_COUNT = 1000; - private final Map> testData = new - ConcurrentHashMap<>(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void generateData() { - for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { - TreeSet currentSet = new TreeSet<>(); - for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) { - long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex; - currentSet.add(new ContainerID(currentCnIndex)); - } - testData.put(UUID.randomUUID(), currentSet); - } - } - - private UUID getFirstKey() { - return testData.keySet().iterator().next(); - } - - @Before - public void setUp() throws Exception { - generateData(); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testIsKnownDatanode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID knownNode = getFirstKey(); - UUID unknownNode = UUID.randomUUID(); - Set containerIDs = testData.get(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - Assert.assertTrue("Not able to detect a known node", - map.isKnownDatanode(knownNode)); - Assert.assertFalse("Unknown node detected", - map.isKnownDatanode(unknownNode)); - } - - @Test - public void testInsertNewDatanode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID knownNode = getFirstKey(); - Set containerIDs = testData.get(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - Set readSet = map.getContainers(knownNode); - - // Assert that all elements are present in the set that we read back from - // node map. - Set newSet = new TreeSet((readSet)); - Assert.assertTrue(newSet.removeAll(containerIDs)); - Assert.assertTrue(newSet.size() == 0); - - thrown.expect(SCMException.class); - thrown.expectMessage("already exists"); - map.insertNewDatanode(knownNode, containerIDs); - - map.removeDatanode(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - - } - - @Test - public void testProcessReportCheckOneNode() throws SCMException { - UUID key = getFirstKey(); - Set values = testData.get(key); - Node2ContainerMap map = new Node2ContainerMap(); - map.insertNewDatanode(key, values); - Assert.assertTrue(map.isKnownDatanode(key)); - ReportResult result = map.processReport(key, values); - Assert.assertEquals(result.getStatus(), - ReportResult.ReportStatus.ALL_IS_WELL); - } - - @Test - public void testUpdateDatanodeMap() throws SCMException { - UUID datanodeId = getFirstKey(); - Set values = testData.get(datanodeId); - Node2ContainerMap map = new Node2ContainerMap(); - map.insertNewDatanode(datanodeId, values); - Assert.assertTrue(map.isKnownDatanode(datanodeId)); - Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size()); - - //remove one container - values.remove(values.iterator().next()); - Assert.assertEquals(CONTAINER_COUNT - 1, values.size()); - Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size()); - - map.setContainersForDatanode(datanodeId, values); - - Assert.assertEquals(values.size(), map.getContainers(datanodeId).size()); - Assert.assertEquals(values, map.getContainers(datanodeId)); - } - - @Test - public void testProcessReportInsertAll() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - - for (Map.Entry> keyEntry : testData.entrySet()) { - map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue()); - } - // Assert all Keys are known datanodes. - for (UUID key : testData.keySet()) { - Assert.assertTrue(map.isKnownDatanode(key)); - } - } - - /* - For ProcessReport we have to test the following scenarios. - - 1. New Datanode - A new datanode appears and we have to add that to the - SCM's Node2Container Map. - - 2. New Container - A Datanode exists, but a new container is added to that - DN. We need to detect that and return a list of added containers. - - 3. Missing Container - A Datanode exists, but one of the expected container - on that datanode is missing. We need to detect that. - - 4. We get a container report that has both the missing and new containers. - We need to return separate lists for these. - */ - - /** - * Assert that we are able to detect the addition of a new datanode. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectNewDataNode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - // If we attempt to process a node that is not present in the map, - // we get a result back that says, NEW_NODE_FOUND. - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - ReportResult result = map.processReport(key, values); - Assert.assertEquals(ReportResult.ReportStatus.NEW_DATANODE_FOUND, - result.getStatus()); - Assert.assertEquals(result.getNewEntries().size(), values.size()); - } - - /** - * This test asserts that processReport is able to detect new containers - * when it is added to a datanode. For that we populate the DN with a list - * of containerIDs and then add few more containers and make sure that we - * are able to detect them. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectNewContainers() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - map.insertNewDatanode(key, values); - - final int newCount = 100; - ContainerID last = values.last(); - TreeSet addedContainers = new TreeSet<>(); - for (int x = 1; x <= newCount; x++) { - long cTemp = last.getId() + x; - addedContainers.add(new ContainerID(cTemp)); - } - - // This set is the super set of existing containers and new containers. - TreeSet newContainersSet = new TreeSet<>(values); - newContainersSet.addAll(addedContainers); - - ReportResult result = map.processReport(key, newContainersSet); - - //Assert that expected size of missing container is same as addedContainers - Assert.assertEquals(ReportResult.ReportStatus.NEW_ENTRIES_FOUND, - result.getStatus()); - - Assert.assertEquals(addedContainers.size(), - result.getNewEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All objects are not removed.", - result.getNewEntries().removeAll(addedContainers)); - } - - /** - * This test asserts that processReport is able to detect missing containers - * if they are misssing from a list. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectMissingContainers() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - map.insertNewDatanode(key, values); - - final int removeCount = 100; - Random r = new Random(); - - ContainerID first = values.first(); - TreeSet removedContainers = new TreeSet<>(); - - // Pick a random container to remove it is ok to collide no issues. - for (int x = 0; x < removeCount; x++) { - int startBase = (int) first.getId(); - long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); - } - - // This set is a new set with some containers removed. - TreeSet newContainersSet = new TreeSet<>(values); - newContainersSet.removeAll(removedContainers); - - ReportResult result = map.processReport(key, newContainersSet); - - - //Assert that expected size of missing container is same as addedContainers - Assert.assertEquals(ReportResult.ReportStatus.MISSING_ENTRIES, - result.getStatus()); - Assert.assertEquals(removedContainers.size(), - result.getMissingEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All missing containers not found.", - result.getMissingEntries().removeAll(removedContainers)); - } - - @Test - public void testProcessReportDetectNewAndMissingContainers() throws - SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - map.insertNewDatanode(key, values); - - Set insertedSet = new TreeSet<>(); - // Insert nodes from 1..30 - for (int x = 1; x <= 30; x++) { - insertedSet.add(new ContainerID(x)); - } - - - final int removeCount = 100; - Random r = new Random(); - - ContainerID first = values.first(); - TreeSet removedContainers = new TreeSet<>(); - - // Pick a random container to remove it is ok to collide no issues. - for (int x = 0; x < removeCount; x++) { - int startBase = (int) first.getId(); - long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); - } - - Set newSet = new TreeSet<>(values); - newSet.addAll(insertedSet); - newSet.removeAll(removedContainers); - - ReportResult result = map.processReport(key, newSet); - - - Assert.assertEquals( - ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND, - result.getStatus()); - Assert.assertEquals(removedContainers.size(), - result.getMissingEntries().size()); - - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All missing containers not found.", - result.getMissingEntries().removeAll(removedContainers)); - - Assert.assertEquals(insertedSet.size(), - result.getNewEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All inserted containers are not found.", - result.getNewEntries().removeAll(insertedSet)); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java deleted file mode 100644 index 6610fcd710610..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Test Node2Container Map. - */ -package org.apache.hadoop.hdds.scm.node.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index da05c59acfcd3..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; -/** - * SCM tests - */ diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java deleted file mode 100644 index 486c604cd063b..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import java.util.ArrayList; -import java.util.List; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; - -/** Test class for SCMChillModeManager. - */ -public class TestSCMChillModeManager { - - private static EventQueue queue; - private SCMChillModeManager scmChillModeManager; - private static Configuration config; - private List containers; - - @Rule - public Timeout timeout = new Timeout(1000 * 20); - - @BeforeClass - public static void setUp() { - queue = new EventQueue(); - config = new OzoneConfiguration(); - } - - @Test - public void testChillModeState() throws Exception { - // Test 1: test for 0 containers - testChillMode(0); - - // Test 2: test for 20 containers - testChillMode(20); - } - - @Test - public void testChillModeStateWithNullContainers() { - new SCMChillModeManager(config, null, queue); - } - - private void testChillMode(int numContainers) throws Exception { - containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(numContainers)); - scmChillModeManager = new SCMChillModeManager(config, containers, queue); - queue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - scmChillModeManager); - assertTrue(scmChillModeManager.getInChillMode()); - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); - GenericTestUtils.waitFor(() -> { - return !scmChillModeManager.getInChillMode(); - }, 100, 1000 * 5); - } - - @Test - public void testChillModeExitRule() throws Exception { - containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); - scmChillModeManager = new SCMChillModeManager(config, containers, queue); - queue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - scmChillModeManager); - assertTrue(scmChillModeManager.getInChillMode()); - - testContainerThreshold(containers.subList(0, 25), 0.25); - assertTrue(scmChillModeManager.getInChillMode()); - testContainerThreshold(containers.subList(25, 50), 0.50); - assertTrue(scmChillModeManager.getInChillMode()); - testContainerThreshold(containers.subList(50, 75), 0.75); - assertTrue(scmChillModeManager.getInChillMode()); - testContainerThreshold(containers.subList(75, 100), 1.0); - - GenericTestUtils.waitFor(() -> { - return !scmChillModeManager.getInChillMode(); - }, 100, 1000 * 5); - } - - @Test - public void testDisableChillMode() { - OzoneConfiguration conf = new OzoneConfiguration(config); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_CHILLMODE_ENABLED, false); - scmChillModeManager = new SCMChillModeManager(conf, containers, queue); - assertFalse(scmChillModeManager.getInChillMode()); - } - - private void testContainerThreshold(List dnContainers, - double expectedThreshold) - throws Exception { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(dnContainers)); - GenericTestUtils.waitFor(() -> { - double threshold = scmChillModeManager.getCurrentContainerThreshold(); - return threshold == expectedThreshold; - }, 100, 2000 * 9); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java deleted file mode 100644 index 4b2001848b19e..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for @{@link SCMClientProtocolServer}. - * */ -public class TestSCMClientProtocolServer { - private SCMClientProtocolServer scmClientProtocolServer; - private OzoneConfiguration config; - private EventQueue eventQueue; - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - eventQueue = new EventQueue(); - scmClientProtocolServer = new SCMClientProtocolServer(config, null); - eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, scmClientProtocolServer); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testAllocateContainerFailureInChillMode() throws Exception { - LambdaTestUtils.intercept(SCMException.class, - "hillModePrecheck failed for allocateContainer", () -> { - scmClientProtocolServer.allocateContainer( - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, ""); - }); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index f3cd4eaabad75..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.scm.server. - SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT; - -/** - * This class tests the behavior of SCMDatanodeHeartbeatDispatcher. - */ -public class TestSCMDatanodeHeartbeatDispatcher { - - - @Test - public void testNodeReportDispatcher() throws IOException { - - AtomicInteger eventReceived = new AtomicInteger(); - - NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); - - SCMDatanodeHeartbeatDispatcher dispatcher = - new SCMDatanodeHeartbeatDispatcher(Mockito.mock(NodeManager.class), - new EventPublisher() { - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - Assert.assertEquals(event, NODE_REPORT); - eventReceived.incrementAndGet(); - Assert.assertEquals(nodeReport, - ((NodeReportFromDatanode)payload).getReport()); - - } - }); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setNodeReport(nodeReport) - .build(); - dispatcher.dispatch(heartbeat); - Assert.assertEquals(1, eventReceived.get()); - - - } - - @Test - public void testContainerReportDispatcher() throws IOException { - - - AtomicInteger eventReceived = new AtomicInteger(); - - ContainerReportsProto containerReport = - ContainerReportsProto.getDefaultInstance(); - CommandStatusReportsProto commandStatusReport = - CommandStatusReportsProto.getDefaultInstance(); - - SCMDatanodeHeartbeatDispatcher dispatcher = - new SCMDatanodeHeartbeatDispatcher(Mockito.mock(NodeManager.class), - new EventPublisher() { - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - Assert.assertTrue( - event.equals(CONTAINER_REPORT) - || event.equals(CMD_STATUS_REPORT)); - - if (payload instanceof ContainerReportFromDatanode) { - Assert.assertEquals(containerReport, - ((ContainerReportFromDatanode) payload).getReport()); - } - if (payload instanceof CommandStatusReportFromDatanode) { - Assert.assertEquals(commandStatusReport, - ((CommandStatusReportFromDatanode) payload).getReport()); - } - eventReceived.incrementAndGet(); - } - }); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setContainerReport(containerReport) - .addCommandStatusReports(commandStatusReport) - .build(); - dispatcher.dispatch(heartbeat); - Assert.assertEquals(2, eventReceived.get()); - - - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java deleted file mode 100644 index 56c3830c9b8bc..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java +++ /dev/null @@ -1,287 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container; - -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler - .CloseContainerStatus; -import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler - .CloseContainerRetryableReq; -import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerMapping; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.server.events.EventWatcher; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.event.Level; - -import java.io.IOException; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; - -/** - * Test class for {@link CloseContainerWatcher}. - * */ -public class TestCloseContainerWatcher implements EventHandler { - - private static final Logger LOG = LoggerFactory - .getLogger(TestCloseContainerWatcher.class); - private static EventWatcher - watcher; - private static LeaseManager leaseManager; - private static ContainerMapping containerMapping = Mockito - .mock(ContainerMapping.class); - private static EventQueue queue; - @Rule - public Timeout timeout = new Timeout(1000*15); - - @After - public void stop() { - leaseManager.shutdown(); - queue.close(); - } - - /* - * This test will test watcher for Failure status event. - * */ - @Test - public void testWatcherForFailureStatusEvent() throws - InterruptedException, IOException { - setupWatcher(90000L); - long id1 = HddsIdFactory.getLongId(); - long id2 = HddsIdFactory.getLongId(); - queue.addHandler(SCMEvents.CLOSE_CONTAINER, this); - setupMock(id1, id2, true); - GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer - .captureLogs(LOG); - GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerWatcher.LOG); - GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE); - testLogger.clearOutput(); - watcherLogger.clearOutput(); - - CommandStatus cmdStatus1 = CommandStatus.newBuilder() - .setCmdId(id1) - .setStatus(CommandStatus.Status.FAILED) - .setType(Type.closeContainerCommand).build(); - CommandStatus cmdStatus2 = CommandStatus.newBuilder() - .setCmdId(id2) - .setStatus(CommandStatus.Status.FAILED) - .setType(Type.closeContainerCommand).build(); - - // File events to watcher - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id1))); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id2))); - Thread.sleep(10L); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new - CloseContainerStatus(cmdStatus1)); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new - CloseContainerStatus(cmdStatus2)); - - Thread.sleep(1000*4L); - // validation - assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand for " + - "containerId: " + id1 + " executed")); - assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand for " + - "containerId: " + id2 + " executed")); - assertTrue( - testLogger.getOutput().contains("Handling closeContainerEvent " + - "for containerId: id=" + id1)); - assertTrue(testLogger.getOutput().contains("Handling closeContainerEvent " + - "for containerId: id=" + id2)); - - } - - @Test - public void testWatcherForPendingStatusEvent() throws - InterruptedException, IOException { - setupWatcher(90000L); - long id1 = HddsIdFactory.getLongId(); - long id2 = HddsIdFactory.getLongId(); - queue.addHandler(SCMEvents.CLOSE_CONTAINER, this); - setupMock(id1, id2, true); - GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer - .captureLogs(LOG); - GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerWatcher.LOG); - GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE); - testLogger.clearOutput(); - watcherLogger.clearOutput(); - - CommandStatus cmdStatus1 = CommandStatus.newBuilder() - .setCmdId(id1) - .setStatus(CommandStatus.Status.PENDING) - .setType(Type.closeContainerCommand).build(); - CommandStatus cmdStatus2 = CommandStatus.newBuilder() - .setCmdId(id2) - .setStatus(CommandStatus.Status.PENDING) - .setType(Type.closeContainerCommand).build(); - - // File events to watcher - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id1))); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id2))); - Thread.sleep(10L); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new - CloseContainerStatus(cmdStatus1)); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new - CloseContainerStatus(cmdStatus2)); - - Thread.sleep(1000*2L); - // validation - assertFalse(watcherLogger.getOutput().contains("CloseContainerCommand " - + "for containerId: " + id1 + " executed")); - assertFalse(watcherLogger.getOutput().contains("CloseContainerCommand " - + "for containerId: " + id2 + " executed")); - assertFalse(testLogger.getOutput().contains("Handling " - + "closeContainerEvent for containerId: id=" + id1)); - assertFalse(testLogger.getOutput().contains("Handling " - + "closeContainerEvent for containerId: id=" + id2)); - - } - - @Test - public void testWatcherForExecutedStatusEvent() - throws IOException, InterruptedException { - setupWatcher(90000L); - long id1 = HddsIdFactory.getLongId(); - long id2 = HddsIdFactory.getLongId(); - queue.addHandler(SCMEvents.CLOSE_CONTAINER, this); - setupMock(id1, id2, true); - GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer - .captureLogs(LOG); - GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerWatcher.LOG); - GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE); - testLogger.clearOutput(); - watcherLogger.clearOutput(); - - // When both of the pending event are executed successfully by DataNode - CommandStatus cmdStatus1 = CommandStatus.newBuilder() - .setCmdId(id1) - .setStatus(CommandStatus.Status.EXECUTED) - .setType(Type.closeContainerCommand).build(); - CommandStatus cmdStatus2 = CommandStatus.newBuilder() - .setCmdId(id2) - .setStatus(CommandStatus.Status.EXECUTED) - .setType(Type.closeContainerCommand).build(); - // File events to watcher - testLogger.clearOutput(); - watcherLogger.clearOutput(); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id1))); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id2))); - Thread.sleep(10L); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, - new CloseContainerStatus(cmdStatus1)); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, - new CloseContainerStatus(cmdStatus2)); - - Thread.sleep(1000*3L); - // validation - assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand " - + "for containerId: " + id1 + " executed")); - assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand " - + "for containerId: " + id2 + " executed")); - assertFalse(testLogger.getOutput().contains("Handling " - + "closeContainerEvent for containerId: id=" + id1)); - assertFalse(testLogger.getOutput().contains("Handling " - + "closeContainerEvent for containerId: id=" + id2)); - } - - private void setupWatcher(long time) { - leaseManager = new LeaseManager<>("TestCloseContainerWatcher#LeaseManager", - time); - leaseManager.start(); - watcher = new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - SCMEvents.CLOSE_CONTAINER_STATUS, leaseManager, containerMapping); - queue = new EventQueue(); - watcher.start(queue); - } - - /* - * This test will fire two retryable closeContainer events. Both will timeout. - * First event container will be open at time of handling so it should be - * sent back to appropriate handler. Second event container will be closed, - * so it should not be retried. - * */ - @Test - public void testWatcherRetryableTimeoutHandling() throws InterruptedException, - IOException { - - long id1 = HddsIdFactory.getLongId(); - long id2 = HddsIdFactory.getLongId(); - setupWatcher(1000L); - queue.addHandler(SCMEvents.CLOSE_CONTAINER, this); - setupMock(id1, id2, false); - GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer - .captureLogs(LOG); - testLogger.clearOutput(); - - // File events to watcher - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id1))); - queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ, - new CloseContainerRetryableReq(ContainerID.valueof(id2))); - - Thread.sleep(1000L + 10); - - // validation - assertTrue(testLogger.getOutput().contains("Handling " - + "closeContainerEvent for containerId: id=" + id1)); - assertFalse(testLogger.getOutput().contains("Handling " - + "closeContainerEvent for containerId: id=" + id2)); - } - - - private void setupMock(long id1, long id2, boolean isOpen) - throws IOException { - ContainerInfo containerInfo = Mockito.mock(ContainerInfo.class); - ContainerInfo containerInfo2 = Mockito.mock(ContainerInfo.class); - when(containerMapping.getContainer(id1)).thenReturn(containerInfo); - when(containerMapping.getContainer(id2)).thenReturn(containerInfo2); - when(containerInfo.isContainerOpen()).thenReturn(true); - when(containerInfo2.isContainerOpen()).thenReturn(isOpen); - } - - @Override - public void onMessage(ContainerID containerID, EventPublisher publisher) { - LOG.info("Handling closeContainerEvent for containerId: {}", containerID); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java deleted file mode 100644 index 390746f4dce38..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ /dev/null @@ -1,530 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import java.util.List; -import java.util.Map; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.VersionInfo; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.states.endpoint - .HeartbeatEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint - .RegisterEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint - .VersionEndpointTask; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.PathUtils; -import org.apache.hadoop.util.Time; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import static org.mockito.Mockito.mock; - -import java.io.File; -import java.net.InetSocketAddress; -import java.util.UUID; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.container.common.ContainerTestUtils - .createEndpoint; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.mockito.Mockito.when; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * Tests the endpoints. - */ -public class TestEndPoint { - private static InetSocketAddress serverAddress; - private static RPC.Server scmServer; - private static ScmTestMock scmServerImpl; - private static File testDir; - private static Configuration config; - - @AfterClass - public static void tearDown() throws Exception { - if (scmServer != null) { - scmServer.stop(); - } - FileUtil.fullyDelete(testDir); - } - - @BeforeClass - public static void setUp() throws Exception { - serverAddress = SCMTestUtils.getReuseableAddress(); - scmServerImpl = new ScmTestMock(); - scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(), - scmServerImpl, serverAddress, 10); - testDir = PathUtils.getTestDir(TestEndPoint.class); - config = SCMTestUtils.getConf(); - config.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - config - .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s"); - } - - @Test - /** - * This test asserts that we are able to make a version call to SCM server - * and gets back the expected values. - */ - public void testGetVersion() throws Exception { - try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), - serverAddress, 1000)) { - SCMVersionResponseProto responseProto = rpcEndPoint.getEndPoint() - .getVersion(null); - Assert.assertNotNull(responseProto); - Assert.assertEquals(VersionInfo.DESCRIPTION_KEY, - responseProto.getKeys(0).getKey()); - Assert.assertEquals(VersionInfo.getLatestVersion().getDescription(), - responseProto.getKeys(0).getValue()); - } - } - - @Test - /** - * We make getVersion RPC call, but via the VersionEndpointTask which is - * how the state machine would make the call. - */ - public void testGetVersionTask() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - serverAddress, 1000)) { - OzoneContainer ozoneContainer = new OzoneContainer( - TestUtils.randomDatanodeDetails(), conf, null); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - - // if version call worked the endpoint should automatically move to the - // next state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - newState); - - // Now rpcEndpoint should remember the version it got from SCM - Assert.assertNotNull(rpcEndPoint.getVersion()); - } - } - - @Test - public void testCheckVersionResponse() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - serverAddress, 1000)) { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(VersionEndpointTask.LOG); - OzoneContainer ozoneContainer = new OzoneContainer(TestUtils - .randomDatanodeDetails(), conf, null); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - - // if version call worked the endpoint should automatically move to the - // next state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - newState); - - // Now rpcEndpoint should remember the version it got from SCM - Assert.assertNotNull(rpcEndPoint.getVersion()); - - // Now change server scmId, so datanode scmId will be - // different from SCM server response scmId - String newScmId = UUID.randomUUID().toString(); - scmServerImpl.setScmId(newScmId); - newState = versionTask.call(); - Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN, - newState); - List volumesList = ozoneContainer.getVolumeSet() - .getFailedVolumesList(); - Assert.assertTrue(volumesList.size() == 1); - File expectedScmDir = new File(volumesList.get(0).getHddsRootDir(), - scmServerImpl.getScmId()); - Assert.assertTrue(logCapturer.getOutput().contains("expected scm " + - "directory " + expectedScmDir.getAbsolutePath() + " does not " + - "exist")); - Assert.assertTrue(ozoneContainer.getVolumeSet().getVolumesList().size() - == 0); - Assert.assertTrue(ozoneContainer.getVolumeSet().getFailedVolumesList() - .size() == 1); - - } - } - - - - @Test - /** - * This test makes a call to end point where there is no SCM server. We - * expect that versionTask should be able to handle it. - */ - public void testGetVersionToInvalidEndpoint() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); - InetSocketAddress nonExistentServerAddress = SCMTestUtils - .getReuseableAddress(); - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - nonExistentServerAddress, 1000)) { - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - OzoneContainer ozoneContainer = new OzoneContainer( - TestUtils.randomDatanodeDetails(), conf, null); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - - // This version call did NOT work, so endpoint should remain in the same - // state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION, - newState); - } - } - - @Test - /** - * This test makes a getVersionRPC call, but the DummyStorageServer is - * going to respond little slowly. We will assert that we are still in the - * GETVERSION state after the timeout. - */ - public void testGetVersionAssertRpcTimeOut() throws Exception { - final long rpcTimeout = 1000; - final long tolerance = 100; - OzoneConfiguration conf = SCMTestUtils.getConf(); - - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - serverAddress, (int) rpcTimeout)) { - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - OzoneContainer ozoneContainer = new OzoneContainer( - TestUtils.randomDatanodeDetails(), conf, null); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - - scmServerImpl.setRpcResponseDelay(1500); - long start = Time.monotonicNow(); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - long end = Time.monotonicNow(); - scmServerImpl.setRpcResponseDelay(0); - Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance)); - Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION, - newState); - } - } - - @Test - public void testRegister() throws Exception { - DatanodeDetails nodeToRegister = TestUtils.randomDatanodeDetails(); - try (EndpointStateMachine rpcEndPoint = createEndpoint( - SCMTestUtils.getConf(), serverAddress, 1000)) { - SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getProtoBufMessage(), TestUtils - .createNodeReport( - getStorageReports(nodeToRegister.getUuid())), - TestUtils.getRandomContainerReports(10), - TestUtils.getRandomPipelineReports()); - Assert.assertNotNull(responseProto); - Assert.assertEquals(nodeToRegister.getUuidString(), - responseProto.getDatanodeUUID()); - Assert.assertNotNull(responseProto.getClusterID()); - Assert.assertEquals(10, scmServerImpl. - getContainerCountsForDatanode(nodeToRegister)); - Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(nodeToRegister)); - } - } - - private StorageReportProto getStorageReports(UUID id) { - String storagePath = testDir.getAbsolutePath() + "/" + id; - return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null); - } - - private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress, - int rpcTimeout, boolean clearDatanodeDetails) throws Exception { - Configuration conf = SCMTestUtils.getConf(); - EndpointStateMachine rpcEndPoint = - createEndpoint(conf, - scmAddress, rpcTimeout); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER); - OzoneContainer ozoneContainer = mock(OzoneContainer.class); - when(ozoneContainer.getNodeReport()).thenReturn(TestUtils - .createNodeReport(getStorageReports(UUID.randomUUID()))); - when(ozoneContainer.getContainerReport()).thenReturn( - TestUtils.getRandomContainerReports(10)); - when(ozoneContainer.getPipelineReport()).thenReturn( - TestUtils.getRandomPipelineReports()); - RegisterEndpointTask endpointTask = - new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer, - mock(StateContext.class)); - if (!clearDatanodeDetails) { - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - endpointTask.setDatanodeDetails(datanodeDetails); - } - endpointTask.call(); - return rpcEndPoint; - } - - @Test - public void testRegisterTask() throws Exception { - try (EndpointStateMachine rpcEndpoint = - registerTaskHelper(serverAddress, 1000, false)) { - // Successful register should move us to Heartbeat state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, - rpcEndpoint.getState()); - } - } - - @Test - public void testRegisterToInvalidEndpoint() throws Exception { - InetSocketAddress address = SCMTestUtils.getReuseableAddress(); - try (EndpointStateMachine rpcEndpoint = - registerTaskHelper(address, 1000, false)) { - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - rpcEndpoint.getState()); - } - } - - @Test - public void testRegisterNoContainerID() throws Exception { - InetSocketAddress address = SCMTestUtils.getReuseableAddress(); - try (EndpointStateMachine rpcEndpoint = - registerTaskHelper(address, 1000, true)) { - // No Container ID, therefore we tell the datanode that we would like to - // shutdown. - Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN, - rpcEndpoint.getState()); - } - } - - @Test - public void testRegisterRpcTimeout() throws Exception { - final long rpcTimeout = 1000; - final long tolerance = 200; - scmServerImpl.setRpcResponseDelay(1500); - long start = Time.monotonicNow(); - registerTaskHelper(serverAddress, 1000, false).close(); - long end = Time.monotonicNow(); - scmServerImpl.setRpcResponseDelay(0); - Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance)); - } - - @Test - public void testHeartbeat() throws Exception { - DatanodeDetails dataNode = TestUtils.randomDatanodeDetails(); - try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), - serverAddress, 1000)) { - SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( - getStorageReports(UUID.randomUUID()))) - .build(); - - SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint() - .sendHeartbeat(request); - Assert.assertNotNull(responseProto); - Assert.assertEquals(0, responseProto.getCommandsCount()); - } - } - - @Test - public void testHeartbeatWithCommandStatusReport() throws Exception { - DatanodeDetails dataNode = TestUtils.randomDatanodeDetails(); - try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), - serverAddress, 1000)) { - // Add some scmCommands for heartbeat response - addScmCommands(); - - - SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( - getStorageReports(UUID.randomUUID()))) - .build(); - - SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint() - .sendHeartbeat(request); - assertNotNull(responseProto); - assertEquals(3, responseProto.getCommandsCount()); - assertEquals(0, scmServerImpl.getCommandStatusReportCount()); - - // Send heartbeat again from heartbeat endpoint task - final StateContext stateContext = heartbeatTaskHelper( - serverAddress, 3000); - Map map = stateContext.getCommandStatusMap(); - assertNotNull(map); - assertEquals("Should have 3 objects", 3, map.size()); - assertTrue(map.containsKey(Long.valueOf(1))); - assertTrue(map.containsKey(Long.valueOf(2))); - assertTrue(map.containsKey(Long.valueOf(3))); - assertTrue(map.get(Long.valueOf(1)).getType() - .equals(Type.closeContainerCommand)); - assertTrue(map.get(Long.valueOf(2)).getType() - .equals(Type.replicateContainerCommand)); - assertTrue( - map.get(Long.valueOf(3)).getType().equals(Type.deleteBlocksCommand)); - assertTrue(map.get(Long.valueOf(1)).getStatus().equals(Status.PENDING)); - assertTrue(map.get(Long.valueOf(2)).getStatus().equals(Status.PENDING)); - assertTrue(map.get(Long.valueOf(3)).getStatus().equals(Status.PENDING)); - - scmServerImpl.clearScmCommandRequests(); - } - } - - private void addScmCommands() { - SCMCommandProto closeCommand = SCMCommandProto.newBuilder() - .setCloseContainerCommandProto( - CloseContainerCommandProto.newBuilder().setCmdId(1) - .setContainerID(1) - .setReplicationType(ReplicationType.RATIS) - .setPipelineID(PipelineID.randomId().getProtobuf()) - .build()) - .setCommandType(Type.closeContainerCommand) - .build(); - SCMCommandProto replicationCommand = SCMCommandProto.newBuilder() - .setReplicateContainerCommandProto( - ReplicateContainerCommandProto.newBuilder() - .setCmdId(2) - .setContainerID(2) - .build()) - .setCommandType(Type.replicateContainerCommand) - .build(); - SCMCommandProto deleteBlockCommand = SCMCommandProto.newBuilder() - .setDeleteBlocksCommandProto( - DeleteBlocksCommandProto.newBuilder() - .setCmdId(3) - .addDeletedBlocksTransactions( - DeletedBlocksTransaction.newBuilder() - .setContainerID(45) - .setCount(1) - .setTxID(23) - .build()) - .build()) - .setCommandType(Type.deleteBlocksCommand) - .build(); - scmServerImpl.addScmCommandRequest(closeCommand); - scmServerImpl.addScmCommandRequest(deleteBlockCommand); - scmServerImpl.addScmCommandRequest(replicationCommand); - } - - private StateContext heartbeatTaskHelper(InetSocketAddress scmAddress, - int rpcTimeout) throws Exception { - Configuration conf = SCMTestUtils.getConf(); - conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - // Mini Ozone cluster will not come up if the port is not true, since - // Ratis will exit if the server port cannot be bound. We can remove this - // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - - - // Create a datanode state machine for stateConext used by endpoint task - try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( - TestUtils.randomDatanodeDetails(), conf); - EndpointStateMachine rpcEndPoint = - createEndpoint(conf, scmAddress, rpcTimeout)) { - HddsProtos.DatanodeDetailsProto datanodeDetailsProto = - TestUtils.randomDatanodeDetails().getProtoBufMessage(); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.HEARTBEAT); - - final StateContext stateContext = - new StateContext(conf, DatanodeStateMachine.DatanodeStates.RUNNING, - stateMachine); - - HeartbeatEndpointTask endpointTask = - new HeartbeatEndpointTask(rpcEndPoint, conf, stateContext); - endpointTask.setDatanodeDetailsProto(datanodeDetailsProto); - endpointTask.call(); - Assert.assertNotNull(endpointTask.getDatanodeDetailsProto()); - - Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, - rpcEndPoint.getState()); - return stateContext; - } - } - - @Test - public void testHeartbeatTask() throws Exception { - heartbeatTaskHelper(serverAddress, 1000); - } - - @Test - public void testHeartbeatTaskToInvalidNode() throws Exception { - InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress(); - heartbeatTaskHelper(invalidAddress, 1000); - } - - @Test - public void testHeartbeatTaskRpcTimeOut() throws Exception { - final long rpcTimeout = 1000; - final long tolerance = 200; - scmServerImpl.setRpcResponseDelay(1500); - long start = Time.monotonicNow(); - InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress(); - heartbeatTaskHelper(invalidAddress, 1000); - long end = Time.monotonicNow(); - scmServerImpl.setRpcResponseDelay(0); - Assert.assertThat(end - start, - lessThanOrEqualTo(rpcTimeout + tolerance)); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java deleted file mode 100644 index da2ae843e11ad..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.ozone.container.common; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java deleted file mode 100644 index 1c80880d01d9f..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.placement; - -import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementRandom; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConsts; -import org.junit.Assert; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.junit.Assert.assertEquals; - -/** - * Asserts that allocation strategy works as expected. - */ -public class TestContainerPlacement { - - private DescriptiveStatistics computeStatistics(NodeManager nodeManager) { - DescriptiveStatistics descriptiveStatistics = new DescriptiveStatistics(); - for (DatanodeDetails dd : nodeManager.getNodes(HEALTHY)) { - float weightedValue = - nodeManager.getNodeStat(dd).get().getScmUsed().get() / (float) - nodeManager.getNodeStat(dd).get().getCapacity().get(); - descriptiveStatistics.addValue(weightedValue); - } - return descriptiveStatistics; - } - - /** - * This test simulates lots of Cluster I/O and updates the metadata in SCM. - * We simulate adding and removing containers from the cluster. It asserts - * that our placement algorithm has taken the capacity of nodes into - * consideration by asserting that standard deviation of used space on these - * has improved. - */ - @Test - public void testCapacityPlacementYieldsBetterDataDistribution() throws - SCMException { - final int opsCount = 200 * 1000; - final int nodesRequired = 3; - Random random = new Random(); - - // The nature of init code in MockNodeManager yields similar clusters. - MockNodeManager nodeManagerCapacity = new MockNodeManager(true, 100); - MockNodeManager nodeManagerRandom = new MockNodeManager(true, 100); - DescriptiveStatistics beforeCapacity = - computeStatistics(nodeManagerCapacity); - DescriptiveStatistics beforeRandom = computeStatistics(nodeManagerRandom); - - //Assert that our initial layout of clusters are similar. - assertEquals(beforeCapacity.getStandardDeviation(), beforeRandom - .getStandardDeviation(), 0.001); - - SCMContainerPlacementCapacity capacityPlacer = new - SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration()); - SCMContainerPlacementRandom randomPlacer = new - SCMContainerPlacementRandom(nodeManagerRandom, new Configuration()); - - for (int x = 0; x < opsCount; x++) { - long containerSize = random.nextInt(100) * OzoneConsts.GB; - List nodesCapacity = - capacityPlacer.chooseDatanodes(new ArrayList<>(), nodesRequired, - containerSize); - assertEquals(nodesRequired, nodesCapacity.size()); - - List nodesRandom = - randomPlacer.chooseDatanodes(nodesCapacity, nodesRequired, - containerSize); - - // One fifth of all calls are delete - if (x % 5 == 0) { - deleteContainer(nodeManagerCapacity, nodesCapacity, containerSize); - deleteContainer(nodeManagerRandom, nodesRandom, containerSize); - } else { - createContainer(nodeManagerCapacity, nodesCapacity, containerSize); - createContainer(nodeManagerRandom, nodesRandom, containerSize); - } - } - DescriptiveStatistics postCapacity = computeStatistics(nodeManagerCapacity); - DescriptiveStatistics postRandom = computeStatistics(nodeManagerRandom); - - // This is a very bold claim, and needs large number of I/O operations. - // The claim in this assertion is that we improved the data distribution - // of this cluster in relation to the start state of the cluster. - Assert.assertTrue(beforeCapacity.getStandardDeviation() > - postCapacity.getStandardDeviation()); - - // This asserts that Capacity placement yields a better placement - // algorithm than random placement, since both cluster started at an - // identical state. - - Assert.assertTrue(postRandom.getStandardDeviation() > - postCapacity.getStandardDeviation()); - } - - private void deleteContainer(MockNodeManager nodeManager, - List nodes, long containerSize) { - for (DatanodeDetails dd : nodes) { - nodeManager.delContainer(dd, containerSize); - } - } - - private void createContainer(MockNodeManager nodeManager, - List nodes, long containerSize) { - for (DatanodeDetails dd : nodes) { - nodeManager.addContainer(dd, containerSize); - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java deleted file mode 100644 index 7150d1b94f4f4..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.placement; - -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Tests that test Metrics that support placement. - */ -public class TestDatanodeMetrics { - @Rule - public ExpectedException exception = ExpectedException.none(); - @Test - public void testSCMNodeMetric() { - SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L); - assertEquals((long) stat.getCapacity().get(), 100L); - assertEquals((long) stat.getScmUsed().get(), 10L); - assertEquals((long) stat.getRemaining().get(), 90L); - SCMNodeMetric metric = new SCMNodeMetric(stat); - - SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L); - assertEquals((long) stat.getCapacity().get(), 100L); - assertEquals((long) stat.getScmUsed().get(), 10L); - assertEquals((long) stat.getRemaining().get(), 90L); - - SCMNodeMetric newMetric = new SCMNodeMetric(newStat); - assertTrue(metric.isEqual(newMetric.get())); - - newMetric.add(stat); - assertTrue(newMetric.isGreater(metric.get())); - - SCMNodeMetric zeroMetric = new SCMNodeMetric(new SCMNodeStat()); - // Assert we can handle zero capacity. - assertTrue(metric.isGreater(zeroMetric.get())); - - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java deleted file mode 100644 index ddd751c3795af..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.ozone.container.placement; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 318c54d9585a4..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.replication; -// Test classes for replication. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java deleted file mode 100644 index 74c3932eba008..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ /dev/null @@ -1,402 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.CommandQueue; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** - * A Node Manager to test replication. - */ -public class ReplicationNodeManagerMock implements NodeManager { - private final Map nodeStateMap; - private final CommandQueue commandQueue; - - /** - * A list of Datanodes and current states. - * @param nodeState A node state map. - */ - public ReplicationNodeManagerMock(Map nodeState, - CommandQueue commandQueue) { - Preconditions.checkNotNull(nodeState); - this.nodeStateMap = nodeState; - this.commandQueue = commandQueue; - } - - /** - * Get the minimum number of nodes to get out of chill mode. - * - * @return int - */ - @Override - public int getMinimumChillModeNodes() { - return 0; - } - - /** - * Returns a chill mode status string. - * - * @return String - */ - @Override - public String getChillModeStatus() { - return null; - } - - /** - * Get the number of data nodes that in all states. - * - * @return A state to number of nodes that in this state mapping - */ - @Override - public Map getNodeCount() { - return null; - } - - /** - * Removes a data node from the management of this Node Manager. - * - * @param node - DataNode. - * @throws NodeNotFoundException - */ - @Override - public void removeNode(DatanodeDetails node) - throws NodeNotFoundException { - nodeStateMap.remove(node); - - } - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * - * @param nodestate - State of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - @Override - public List getNodes(NodeState nodestate) { - return null; - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param nodestate - State of the node - * @return int -- count - */ - @Override - public int getNodeCount(NodeState nodestate) { - return 0; - } - - /** - * Get all datanodes known to SCM. - * - * @return List of DatanodeDetails known to SCM. - */ - @Override - public List getAllNodes() { - return null; - } - - /** - * Chill mode is the period when node manager waits for a minimum - * configured number of datanodes to report in. This is called chill mode - * to indicate the period before node manager gets into action. - *

- * Forcefully exits the chill mode, even if we have not met the minimum - * criteria of the nodes reporting in. - */ - @Override - public void forceExitChillMode() { - - } - - /** - * Puts the node manager into manual chill mode. - */ - @Override - public void enterChillMode() { - - } - - /** - * Brings node manager out of manual chill mode. - */ - @Override - public void exitChillMode() { - - } - - /** - * Returns true if node manager is out of chill mode, else false. - * @return true if out of chill mode, else false - */ - @Override - public boolean isOutOfChillMode() { - return !nodeStateMap.isEmpty(); - } - - /** - * Returns the aggregated node stats. - * - * @return the aggregated node stats. - */ - @Override - public SCMNodeStat getStats() { - return null; - } - - /** - * Return a map of node stats. - * - * @return a map of individual node stats (live/stale but not dead). - */ - @Override - public Map getNodeStats() { - return null; - } - - /** - * Return the node stat of the specified datanode. - * - * @param dd - datanode details. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - @Override - public SCMNodeMetric getNodeStat(DatanodeDetails dd) { - return null; - } - - - /** - * Returns the node state of a specific node. - * - * @param dd - DatanodeDetails - * @return Healthy/Stale/Dead. - */ - @Override - public NodeState getNodeState(DatanodeDetails dd) { - return nodeStateMap.get(dd); - } - - /** - * Get set of pipelines a datanode is part of. - * @param dnId - datanodeID - * @return Set of PipelineID - */ - @Override - public Set getPipelineByDnID(UUID dnId) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - @Override - public void addPipeline(Pipeline pipeline) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - @Override - public void removePipeline(Pipeline pipeline) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws SCMException - if datanode is not known. For new datanode use - * addDatanodeInContainerMap call. - */ - @Override - public void setContainersForDatanode(UUID uuid, Set containerIds) - throws SCMException { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Process containerReport received from datanode. - * @param uuid - DataonodeID - * @param containerIds - Set of containerIDs - * @return The result after processing containerReport - */ - @Override - public ReportResult processContainerReport(UUID uuid, - Set containerIds) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - @Override - public Set getContainers(UUID uuid) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Insert a new datanode with set of containerIDs for containers available - * on it. - * @param uuid - DatanodeID - * @param containerIDs - Set of ContainerIDs - * @throws SCMException - if datanode already exists - */ - @Override - public void addDatanodeInContainerMap(UUID uuid, - Set containerIDs) throws SCMException { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } - - /** - * Gets the version info from SCM. - * - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed by - * datanode. - */ - @Override - public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { - return null; - } - - /** - * Register the node if the node finds that it is not registered with any SCM. - * - * @param dd DatanodeDetailsProto - * @param nodeReport NodeReportProto - * @return SCMHeartbeatResponseProto - */ - @Override - public RegisteredCommand register(DatanodeDetails dd, - NodeReportProto nodeReport, - PipelineReportsProto pipelineReportsProto) { - return null; - } - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * - * @param dd - Datanode Details. - * @return SCMheartbeat response list - */ - @Override - public List processHeartbeat(DatanodeDetails dd) { - return null; - } - - /** - * Clears all nodes from the node Manager. - */ - public void clearMap() { - this.nodeStateMap.clear(); - } - - /** - * Adds a node to the existing Node manager. This is used only for test - * purposes. - * @param id DatanodeDetails - * @param state State you want to put that node to. - */ - public void addNode(DatanodeDetails id, NodeState state) { - nodeStateMap.put(id, state); - } - - @Override - public void addDatanodeCommand(UUID dnId, SCMCommand command) { - this.commandQueue.addCommand(dnId, command); - } - - /** - * Empty implementation for processNodeReport. - * @param dnUuid - * @param nodeReport - */ - @Override - public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) { - // do nothing. - } - - @Override - public void onMessage(CommandForDatanode commandForDatanode, - EventPublisher publisher) { - // do nothing. - } - - /** - * Empty implementation for processDeadNode. - * @param dnUuid - */ - @Override - public void processDeadNode(UUID dnUuid) { - // do nothing. - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java deleted file mode 100644 index 4e8a90bf1d42c..0000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; -// Helper classes for ozone and container tests. \ No newline at end of file diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml deleted file mode 100644 index 37c7d9d74ee1d..0000000000000 --- a/hadoop-hdds/tools/pom.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.3.0-SNAPSHOT - - - hadoop-hdds-tools - 0.3.0-SNAPSHOT - Apache Hadoop Distributed Data Store Tools - Apache Hadoop HDDS Tools - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - org.apache.hadoop - hadoop-hdds-client - - - org.apache.hadoop - hadoop-common - - - commons-cli - commons-cli - - - org.xerial - sqlite-jdbc - 3.8.7 - - - - - diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java deleted file mode 100644 index 59cd0ba9a46f2..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import java.io.IOException; -import java.net.InetSocketAddress; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.cli.container.CloseSubcommand; -import org.apache.hadoop.hdds.scm.cli.container.CreateSubcommand; -import org.apache.hadoop.hdds.scm.cli.container.DeleteSubcommand; -import org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand; -import org.apache.hadoop.hdds.scm.cli.container.ListSubcommand; -import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.NativeCodeLoader; - -import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; - -/** - * This class is the CLI of SCM. - */ - -/** - * Container subcommand. - */ -@Command(name = "ozone scmcli", hidden = true, description = - "Developer tools to handle SCM specific " - + "operations.", - versionProvider = HddsVersionProvider.class, - subcommands = { - ListSubcommand.class, - InfoSubcommand.class, - DeleteSubcommand.class, - CreateSubcommand.class, - CloseSubcommand.class - }, - mixinStandardHelpOptions = true) -public class SCMCLI extends GenericCli { - - @Option(names = {"--scm"}, description = "The destination scm (host:port)") - private String scm = ""; - - /** - * Main for the scm shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - - LogManager.resetConfiguration(); - Logger.getRootLogger().setLevel(Level.INFO); - Logger.getRootLogger() - .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); - Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - - new SCMCLI().run(argv); - } - - public ScmClient createScmClient() - throws IOException { - - OzoneConfiguration ozoneConf = createOzoneConfiguration(); - if (StringUtils.isNotEmpty(scm)) { - ozoneConf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm); - } - if (!HddsUtils.getHostNameFromConfigKeys(ozoneConf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) { - - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY - + " should be set in ozone-site.xml or with the --scm option"); - } - - long version = RPC.getProtocolVersion( - StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddress = - getScmAddressForClients(ozoneConf); - int containerSizeGB = (int) ozoneConf.getStorageSize( - OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.GB); - ContainerOperationClient - .setContainerSizeB(containerSizeGB * OzoneConsts.GB); - - RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - StorageContainerLocationProtocolClientSideTranslatorPB client = - new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, version, - scmAddress, UserGroupInformation.getCurrentUser(), ozoneConf, - NetUtils.getDefaultSocketFactory(ozoneConf), - Client.getRpcTimeout(ozoneConf))); - return new ContainerOperationClient( - client, new XceiverClientManager(ozoneConf)); - } - - public void checkContainerExists(ScmClient scmClient, long containerId) - throws IOException { - ContainerInfo container = scmClient.getContainer(containerId); - if (container == null) { - throw new IllegalArgumentException("No such container " + containerId); - } - } - -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java deleted file mode 100644 index 173d0ce0231cc..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.SCMCLI; -import org.apache.hadoop.hdds.scm.client.ScmClient; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; - -/** - * The handler of close container command. - */ -@Command( - name = "close", - description = "close container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class CloseSubcommand implements Callable { - - @ParentCommand - private SCMCLI parent; - - @Parameters(description = "Id of the container to close") - private long containerId; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - parent.checkContainerExists(scmClient, containerId); - scmClient.closeContainer(containerId); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java deleted file mode 100644 index 1dda9c485eda3..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.SCMCLI; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerWithPipeline; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process container creation command. - */ -@Command( - name = "create", - description = "Create container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class CreateSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(CreateSubcommand.class); - - @ParentCommand - private SCMCLI parent; - - @Option(description = "Owner of the new container", defaultValue = "OZONE", - required = false, names = { - "-o", "--owner"}) - - private String owner; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - ContainerWithPipeline container = scmClient.createContainer(owner); - LOG.info("Container {} is created.", - container.getContainerInfo().getContainerID()); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java deleted file mode 100644 index c163a3a9a537a..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.SCMCLI; -import org.apache.hadoop.hdds.scm.client.ScmClient; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process delete container command. - */ -@Command( - name = "delete", - description = "Delete container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class DeleteSubcommand implements Callable { - - @Parameters(description = "Id of the container to close") - private long containerId; - - @Option(names = {"-f", - "--force"}, description = "forcibly delete the container") - private boolean force; - - @ParentCommand - private SCMCLI parent; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - parent.checkContainerExists(scmClient, containerId); - scmClient.deleteContainer(containerId, force); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java deleted file mode 100644 index 0135df38407e9..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerLifeCycleState; -import org.apache.hadoop.hdds.scm.cli.SCMCLI; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerWithPipeline; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process container info command. - */ -@Command( - name = "info", - description = "Show information about a specific container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class InfoSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - - @ParentCommand - private SCMCLI parent; - - @Parameters(description = "Decimal id of the container.") - private long containerID; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - ContainerWithPipeline container = scmClient. - getContainerWithPipeline(containerID); - Preconditions.checkNotNull(container, "Container cannot be null"); - - ContainerData containerData = scmClient.readContainer(container - .getContainerInfo().getContainerID(), container.getPipeline()); - - // Print container report info. - LOG.info("Container id: {}", containerID); - String openStatus = - containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" : - "CLOSED"; - LOG.info("Container State: {}", openStatus); - LOG.info("Container Path: {}", containerData.getContainerPath()); - - // Output meta data. - String metadataStr = containerData.getMetadataList().stream().map( - p -> p.getKey() + ":" + p.getValue()) - .collect(Collectors.joining(", ")); - LOG.info("Container Metadata: {}", metadataStr); - - // Print pipeline of an existing container. - LOG.info("LeaderID: {}", container.getPipeline() - .getLeader().getHostName()); - String machinesStr = container.getPipeline() - .getMachines().stream().map( - DatanodeDetails::getHostName).collect(Collectors.joining(",")); - LOG.info("Datanodes: [{}]", machinesStr); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java deleted file mode 100644 index 0f520fd1b74e6..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.SCMCLI; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.Help.Visibility; -import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process container list command. - */ -@Command( - name = "list", - description = "List containers", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ListSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - - @ParentCommand - private SCMCLI parent; - - @Option(names = {"-s", "--start"}, - description = "Container id to start the iteration", required = true) - private long startId; - - @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list", - defaultValue = "20", showDefaultValue = Visibility.ALWAYS) - private int count = 20; - - private void outputContainerInfo(ContainerInfo containerInfo) - throws IOException { - // Print container report info. - LOG.info("{}", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - containerInfo.toJsonString())); - } - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - - List containerList = - scmClient.listContainer(startId, count); - - // Output data list - for (ContainerInfo container : containerList) { - outputContainerInfo(container); - } - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java deleted file mode 100644 index ff8adbc56f191..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Contains all of the container related scm commands. - */ -package org.apache.hadoop.hdds.scm.cli.container; \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java deleted file mode 100644 index d358b3cf6a57c..0000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * SCM related cli tools. - */ -/** - * SCM related cli tools. - */ -package org.apache.hadoop.hdds.scm.cli; \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html index 0fdf552e08315..174a9dc1b65e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html @@ -98,52 +98,6 @@ {/dn.BPServiceActorInfo} -{#ozone.enabled} -

- - - - - - - - - - - {#ozone.SCMServers} - - - - - - - - {/ozone.SCMServers} -
SCM AddressStatusVersionMissed countLast heartbeat
{addressString}{state}{versionNumber}{missedCount}s{lastSuccessfulHeartbeat|elapsed|fmt_time}
- - - - - - - - - - - - - {#ozone.LocationReport} - - - - - - - - {/ozone.LocationReport} -
IDCapacityRemainingSCM usedfailed
{id}{capacity|fmt_bytes}{remaining|fmt_bytes}{scmUsed|fmt_bytes}{failed}
-{/ozone.enabled} - @@ -179,4 +133,4 @@ - \ No newline at end of file + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties index 2d1c98bb1801c..bd5a4dda41356 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties @@ -48,26 +48,3 @@ log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n log4j.appender.DNMETRICSRFA.MaxBackupIndex=1 log4j.appender.DNMETRICSRFA.MaxFileSize=64MB -# -# Add a logger for ozone that is separate from the Datanode. -# -log4j.logger.org.apache.hadoop.ozone=INFO,OZONE,FILE - -# Do not log into datanode logs. Remove this line to have single log. -log4j.additivity.org.apache.hadoop.ozone=false - -# For development purposes, log both to console and log file. -log4j.appender.OZONE=org.apache.log4j.ConsoleAppender -log4j.appender.OZONE.Threshold=ALL -log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout -log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) \ - %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n - -# Real ozone logger that writes to ozone.log -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender -log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log -log4j.appender.FILE.Threshold=debug -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ - (%F:%L) %X{function} %X{resource} %X{user} %X{request} - \ - %m%n diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml deleted file mode 100644 index e471710c293ff..0000000000000 --- a/hadoop-ozone/client/pom.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.3.0-SNAPSHOT - - hadoop-ozone-client - 0.3.0-SNAPSHOT - Apache Hadoop Ozone Client - Apache Hadoop Ozone Client - jar - - - - org.apache.hadoop - hadoop-ozone-common - - - \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java deleted file mode 100644 index 0da52dc033986..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; - -import java.util.List; - -/** - * This class encapsulates the arguments that are - * required for creating a bucket. - */ -public final class BucketArgs { - - /** - * ACL Information. - */ - private List acls; - /** - * Bucket Version flag. - */ - private Boolean versioning; - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - - /** - * Private constructor, constructed via builder. - * @param versioning Bucket version flag. - * @param storageType Storage type to be used. - * @param acls list of ACLs. - */ - private BucketArgs(Boolean versioning, StorageType storageType, - List acls) { - this.acls = acls; - this.versioning = versioning; - this.storageType = storageType; - } - - /** - * Returns true if bucket version is enabled, else false. - * @return isVersionEnabled - */ - public Boolean getVersioning() { - return versioning; - } - - /** - * Returns the type of storage to be used. - * @return StorageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns the ACL's associated with this bucket. - * @return List - */ - public List getAcls() { - return acls; - } - - /** - * Returns new builder class that builds a OmBucketInfo. - * - * @return Builder - */ - public static BucketArgs.Builder newBuilder() { - return new BucketArgs.Builder(); - } - - /** - * Builder for OmBucketInfo. - */ - public static class Builder { - private Boolean versioning; - private StorageType storageType; - private List acls; - - public BucketArgs.Builder setVersioning(Boolean versionFlag) { - this.versioning = versionFlag; - return this; - } - - public BucketArgs.Builder setStorageType(StorageType storage) { - this.storageType = storage; - return this; - } - - public BucketArgs.Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; - return this; - } - - /** - * Constructs the BucketArgs. - * @return instance of BucketArgs. - */ - public BucketArgs build() { - return new BucketArgs(versioning, storageType, acls); - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java deleted file mode 100644 index 17d19389e016c..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.security.UserGroupInformation; - -import java.io.IOException; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; - -/** - * ObjectStore class is responsible for the client operations that can be - * performed on Ozone Object Store. - */ -public class ObjectStore { - - /** - * The proxy used for connecting to the cluster and perform - * client operations. - */ - private final ClientProtocol proxy; - - /** - * Cache size to be used for listVolume calls. - */ - private int listCacheSize; - - /** - * Creates an instance of ObjectStore. - * @param conf Configuration object. - * @param proxy ClientProtocol proxy. - */ - public ObjectStore(Configuration conf, ClientProtocol proxy) { - this.proxy = proxy; - this.listCacheSize = HddsClientUtils.getListCacheSize(conf); - } - - @VisibleForTesting - protected ObjectStore() { - proxy = null; - } - - /** - * Creates the volume with default values. - * @param volumeName Name of the volume to be created. - * @throws IOException - */ - public void createVolume(String volumeName) throws IOException { - proxy.createVolume(volumeName); - } - - /** - * Creates the volume. - * @param volumeName Name of the volume to be created. - * @param volumeArgs Volume properties. - * @throws IOException - */ - public void createVolume(String volumeName, VolumeArgs volumeArgs) - throws IOException { - proxy.createVolume(volumeName, volumeArgs); - } - - /** - * Returns the volume information. - * @param volumeName Name of the volume. - * @return OzoneVolume - * @throws IOException - */ - public OzoneVolume getVolume(String volumeName) throws IOException { - OzoneVolume volume = proxy.getVolumeDetails(volumeName); - return volume; - } - - - /** - * Returns Iterator to iterate over all the volumes in object store. - * The result can be restricted using volume prefix, will return all - * volumes if volume prefix is null. - * - * @param volumePrefix Volume prefix to match - * @return {@code Iterator} - */ - public Iterator listVolumes(String volumePrefix) - throws IOException { - return listVolumes(volumePrefix, null); - } - - /** - * Returns Iterator to iterate over all the volumes after prevVolume in object - * store. If prevVolume is null it iterates from the first volume. - * The result can be restricted using volume prefix, will return all - * volumes if volume prefix is null. - * - * @param volumePrefix Volume prefix to match - * @param prevVolume Volumes will be listed after this volume name - * @return {@code Iterator} - */ - public Iterator listVolumes(String volumePrefix, - String prevVolume) throws IOException { - return new VolumeIterator(null, volumePrefix, prevVolume); - } - - /** - * Returns Iterator to iterate over the list of volumes after prevVolume owned - * by a specific user. The result can be restricted using volume prefix, will - * return all volumes if volume prefix is null. If user is not null, returns - * the volume of current user. - * - * @param user User Name - * @param volumePrefix Volume prefix to match - * @param prevVolume Volumes will be listed after this volume name - * @return {@code Iterator} - */ - public Iterator listVolumesByUser(String user, - String volumePrefix, String prevVolume) - throws IOException { - if(Strings.isNullOrEmpty(user)) { - user = UserGroupInformation.getCurrentUser().getShortUserName(); - } - return new VolumeIterator(user, volumePrefix, prevVolume); - } - - /** - * Deletes the volume. - * @param volumeName Name of the volume. - * @throws IOException - */ - public void deleteVolume(String volumeName) throws IOException { - proxy.deleteVolume(volumeName); - } - - /** - * An Iterator to iterate over {@link OzoneVolume} list. - */ - private class VolumeIterator implements Iterator { - - private String user = null; - private String volPrefix = null; - - private Iterator currentIterator; - private OzoneVolume currentValue; - - /** - * Creates an Iterator to iterate over all volumes after - * prevVolume of the user. If prevVolume is null it iterates from the - * first volume. The returned volumes match volume prefix. - * @param user user name - * @param volPrefix volume prefix to match - */ - VolumeIterator(String user, String volPrefix, String prevVolume) { - this.user = user; - this.volPrefix = volPrefix; - this.currentValue = null; - this.currentIterator = getNextListOfVolumes(prevVolume).iterator(); - } - - @Override - public boolean hasNext() { - if(!currentIterator.hasNext()) { - currentIterator = getNextListOfVolumes( - currentValue != null ? currentValue.getName() : null) - .iterator(); - } - return currentIterator.hasNext(); - } - - @Override - public OzoneVolume next() { - if(hasNext()) { - currentValue = currentIterator.next(); - return currentValue; - } - throw new NoSuchElementException(); - } - - /** - * Returns the next set of volume list using proxy. - * @param prevVolume previous volume, this will be excluded from the result - * @return {@code List} - */ - private List getNextListOfVolumes(String prevVolume) { - try { - //if user is null, we do list of all volumes. - if(user != null) { - return proxy.listVolumes(user, volPrefix, prevVolume, listCacheSize); - } - return proxy.listVolumes(volPrefix, prevVolume, listCacheSize); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java deleted file mode 100644 index 751992ed6d745..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ /dev/null @@ -1,382 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.OzoneAcl; - -import java.io.IOException; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; - -/** - * A class that encapsulates OzoneBucket. - */ -public class OzoneBucket { - - /** - * The proxy used for connecting to the cluster and perform - * client operations. - */ - private final ClientProtocol proxy; - /** - * Name of the volume in which the bucket belongs to. - */ - private final String volumeName; - /** - * Name of the bucket. - */ - private final String name; - /** - * Default replication factor to be used while creating keys. - */ - private final ReplicationFactor defaultReplication; - - /** - * Default replication type to be used while creating keys. - */ - private final ReplicationType defaultReplicationType; - /** - * Bucket ACLs. - */ - private List acls; - - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - - /** - * Bucket Version flag. - */ - private Boolean versioning; - - /** - * Cache size to be used for listKey calls. - */ - private int listCacheSize; - - /** - * Creation time of the bucket. - */ - private long creationTime; - - /** - * Constructs OzoneBucket instance. - * @param conf Configuration object. - * @param proxy ClientProtocol proxy. - * @param volumeName Name of the volume the bucket belongs to. - * @param bucketName Name of the bucket. - * @param acls ACLs associated with the bucket. - * @param storageType StorageType of the bucket. - * @param versioning versioning status of the bucket. - * @param creationTime creation time of the bucket. - */ - public OzoneBucket(Configuration conf, ClientProtocol proxy, - String volumeName, String bucketName, - List acls, StorageType storageType, - Boolean versioning, long creationTime) { - Preconditions.checkNotNull(proxy, "Client proxy is not set."); - this.proxy = proxy; - this.volumeName = volumeName; - this.name = bucketName; - this.acls = acls; - this.storageType = storageType; - this.versioning = versioning; - this.listCacheSize = HddsClientUtils.getListCacheSize(conf); - this.creationTime = creationTime; - this.defaultReplication = ReplicationFactor.valueOf(conf.getInt( - OzoneConfigKeys.OZONE_REPLICATION, - OzoneConfigKeys.OZONE_REPLICATION_DEFAULT)); - this.defaultReplicationType = ReplicationType.valueOf(conf.get( - OzoneConfigKeys.OZONE_REPLICATION_TYPE, - OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT)); - } - - @VisibleForTesting - OzoneBucket(String volumeName, String name, - ReplicationFactor defaultReplication, - ReplicationType defaultReplicationType, - List acls, StorageType storageType, Boolean versioning, - long creationTime) { - this.proxy = null; - this.volumeName = volumeName; - this.name = name; - this.defaultReplication = defaultReplication; - this.defaultReplicationType = defaultReplicationType; - this.acls = acls; - this.storageType = storageType; - this.versioning = versioning; - this.creationTime = creationTime; - } - - /** - * Returns Volume Name. - * - * @return volumeName - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns Bucket Name. - * - * @return bucketName - */ - public String getName() { - return name; - } - - /** - * Returns ACL's associated with the Bucket. - * - * @return acls - */ - public List getAcls() { - return acls; - } - - /** - * Returns StorageType of the Bucket. - * - * @return storageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns Versioning associated with the Bucket. - * - * @return versioning - */ - public Boolean getVersioning() { - return versioning; - } - - /** - * Returns creation time of the Bucket. - * - * @return creation time of the bucket - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Adds ACLs to the Bucket. - * @param addAcls ACLs to be added - * @throws IOException - */ - public void addAcls(List addAcls) throws IOException { - proxy.addBucketAcls(volumeName, name, addAcls); - addAcls.stream().filter(acl -> !acls.contains(acl)).forEach( - acls::add); - } - - /** - * Removes ACLs from the bucket. - * @param removeAcls ACLs to be removed - * @throws IOException - */ - public void removeAcls(List removeAcls) throws IOException { - proxy.removeBucketAcls(volumeName, name, removeAcls); - acls.removeAll(removeAcls); - } - - /** - * Sets/Changes the storage type of the bucket. - * @param newStorageType Storage type to be set - * @throws IOException - */ - public void setStorageType(StorageType newStorageType) throws IOException { - proxy.setBucketStorageType(volumeName, name, newStorageType); - storageType = newStorageType; - } - - /** - * Enable/Disable versioning of the bucket. - * @param newVersioning - * @throws IOException - */ - public void setVersioning(Boolean newVersioning) throws IOException { - proxy.setBucketVersioning(volumeName, name, newVersioning); - versioning = newVersioning; - } - - /** - * Creates a new key in the bucket, with default replication type RATIS and - * with replication factor THREE. - * @param key Name of the key to be created. - * @param size Size of the data the key will point to. - * @return OzoneOutputStream to which the data has to be written. - * @throws IOException - */ - public OzoneOutputStream createKey(String key, long size) - throws IOException { - return createKey(key, size, defaultReplicationType, defaultReplication); - } - - /** - * Creates a new key in the bucket. - * @param key Name of the key to be created. - * @param size Size of the data the key will point to. - * @param type Replication type to be used. - * @param factor Replication factor of the key. - * @return OzoneOutputStream to which the data has to be written. - * @throws IOException - */ - public OzoneOutputStream createKey(String key, long size, - ReplicationType type, - ReplicationFactor factor) - throws IOException { - return proxy.createKey(volumeName, name, key, size, type, factor); - } - - /** - * Reads an existing key from the bucket. - * @param key Name of the key to be read. - * @return OzoneInputStream the stream using which the data can be read. - * @throws IOException - */ - public OzoneInputStream readKey(String key) throws IOException { - return proxy.getKey(volumeName, name, key); - } - - /** - * Returns information about the key. - * @param key Name of the key. - * @return OzoneKeyDetails Information about the key. - * @throws IOException - */ - public OzoneKeyDetails getKey(String key) throws IOException { - return proxy.getKeyDetails(volumeName, name, key); - } - - /** - * Returns Iterator to iterate over all keys in the bucket. - * The result can be restricted using key prefix, will return all - * keys if key prefix is null. - * - * @param keyPrefix Bucket prefix to match - * @return {@code Iterator} - */ - public Iterator listKeys(String keyPrefix) { - return listKeys(keyPrefix, null); - } - - /** - * Returns Iterator to iterate over all keys after prevKey in the bucket. - * If prevKey is null it iterates from the first key in the bucket. - * The result can be restricted using key prefix, will return all - * keys if key prefix is null. - * - * @param keyPrefix Bucket prefix to match - * @param prevKey Keys will be listed after this key name - * @return {@code Iterator} - */ - public Iterator listKeys(String keyPrefix, - String prevKey) { - return new KeyIterator(keyPrefix, prevKey); - } - - /** - * Deletes key from the bucket. - * @param key Name of the key to be deleted. - * @throws IOException - */ - public void deleteKey(String key) throws IOException { - proxy.deleteKey(volumeName, name, key); - } - - public void renameKey(String fromKeyName, String toKeyName) - throws IOException { - proxy.renameKey(volumeName, name, fromKeyName, toKeyName); - } - - /** - * An Iterator to iterate over {@link OzoneKey} list. - */ - private class KeyIterator implements Iterator { - - private String keyPrefix = null; - - private Iterator currentIterator; - private OzoneKey currentValue; - - - /** - * Creates an Iterator to iterate over all keys after prevKey in the bucket. - * If prevKey is null it iterates from the first key in the bucket. - * The returned keys match key prefix. - * @param keyPrefix - */ - KeyIterator(String keyPrefix, String prevKey) { - this.keyPrefix = keyPrefix; - this.currentValue = null; - this.currentIterator = getNextListOfKeys(prevKey).iterator(); - } - - @Override - public boolean hasNext() { - if(!currentIterator.hasNext()) { - currentIterator = getNextListOfKeys( - currentValue != null ? currentValue.getName() : null) - .iterator(); - } - return currentIterator.hasNext(); - } - - @Override - public OzoneKey next() { - if(hasNext()) { - currentValue = currentIterator.next(); - return currentValue; - } - throw new NoSuchElementException(); - } - - /** - * Gets the next set of key list using proxy. - * @param prevKey - * @return {@code List} - */ - private List getNextListOfKeys(String prevKey) { - try { - return proxy.listKeys(volumeName, name, keyPrefix, prevKey, - listCacheSize); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java deleted file mode 100644 index 0d65d73fc3b5b..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; - -import java.io.Closeable; -import java.io.IOException; - -import com.google.common.annotations.VisibleForTesting; - -/** - * OzoneClient connects to Ozone Cluster and - * perform basic operations. - */ -public class OzoneClient implements Closeable { - - /* - * OzoneClient connects to Ozone Cluster and - * perform basic operations. - * - * +-------------+ +---+ +-------------------------------------+ - * | OzoneClient | --> | C | | Object Store | - * |_____________| | l | | +-------------------------------+ | - * | i | | | Volume(s) | | - * | e | | | +------------------------+ | | - * | n | | | | Bucket(s) | | | - * | t | | | | +------------------+ | | | - * | | | | | | Key -> Value (s) | | | | - * | P |-->| | | | | | | | - * | r | | | | |__________________| | | | - * | o | | | | | | | - * | t | | | |________________________| | | - * | o | | | | | - * | c | | |_______________________________| | - * | o | | | - * | l | |_____________________________________| - * |___| - * Example: - * ObjectStore store = client.getObjectStore(); - * store.createVolume(“volume one”, VolumeArgs); - * volume.setQuota(“10 GB”); - * OzoneVolume volume = store.getVolume(“volume one”); - * volume.createBucket(“bucket one”, BucketArgs); - * bucket.setVersioning(true); - * OzoneOutputStream os = bucket.createKey(“key one”, 1024); - * os.write(byte[]); - * os.close(); - * OzoneInputStream is = bucket.readKey(“key one”); - * is.read(); - * is.close(); - * bucket.deleteKey(“key one”); - * volume.deleteBucket(“bucket one”); - * store.deleteVolume(“volume one”); - * client.close(); - */ - - private final ClientProtocol proxy; - private final ObjectStore objectStore; - - /** - * Creates a new OzoneClient object, generally constructed - * using {@link OzoneClientFactory}. - * @param conf Configuration object - * @param proxy ClientProtocol proxy instance - */ - public OzoneClient(Configuration conf, ClientProtocol proxy) { - this.proxy = proxy; - this.objectStore = new ObjectStore(conf, this.proxy); - } - - @VisibleForTesting - protected OzoneClient(ObjectStore objectStore) { - this.objectStore = objectStore; - this.proxy = null; - } - /** - * Returns the object store associated with the Ozone Cluster. - * @return ObjectStore - */ - public ObjectStore getObjectStore() { - return objectStore; - } - - /** - * Closes the client and all the underlying resources. - * @throws IOException - */ - @Override - public void close() throws IOException { - proxy.close(); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java deleted file mode 100644 index de3116a6aa842..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.ozone.client.rest.OzoneException; - -/** - * This exception is thrown by the Ozone Clients. - */ -public class OzoneClientException extends OzoneException { - /** - * Constructor that allows the shortMessage. - * - * @param shortMessage Short Message - */ - public OzoneClientException(String shortMessage) { - super(0, shortMessage, shortMessage); - } - - /** - * Constructor that allows a shortMessage and an exception. - * - * @param shortMessage short message - * @param ex exception - */ - public OzoneClientException(String shortMessage, Exception ex) { - super(0, shortMessage, shortMessage, ex); - } - - /** - * Constructor that allows the shortMessage and a longer message. - * - * @param shortMessage Short Message - * @param message long error message - */ - public OzoneClientException(String shortMessage, String message) { - super(0, shortMessage, message); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java deleted file mode 100644 index de0d166abda55..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.RestClient; -import org.apache.hadoop.ozone.client.rpc.RpcClient; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Proxy; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_PROTOCOL; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; - -/** - * Factory class to create different types of OzoneClients. - * Based on ozone.client.protocol, it decides which - * protocol to use for the communication. - * Default value is - * org.apache.hadoop.ozone.client.rpc.RpcClient.
- * OzoneClientFactory constructs a proxy using - * {@link OzoneClientInvocationHandler} - * and creates OzoneClient instance with it. - * {@link OzoneClientInvocationHandler} dispatches the call to - * underlying {@link ClientProtocol} implementation. - */ -public final class OzoneClientFactory { - - private static final Logger LOG = LoggerFactory.getLogger( - OzoneClientFactory.class); - - /** - * Private constructor, class is not meant to be initialized. - */ - private OzoneClientFactory(){} - - - /** - * Constructs and return an OzoneClient with default configuration. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getClient() throws IOException { - LOG.info("Creating OzoneClient with default configuration."); - return getClient(new OzoneConfiguration()); - } - - /** - * Constructs and return an OzoneClient based on the configuration object. - * Protocol type is decided by ozone.client.protocol. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getClient(Configuration config) - throws IOException { - Preconditions.checkNotNull(config); - Class clazz = (Class) - config.getClass(OZONE_CLIENT_PROTOCOL, RpcClient.class); - return getClient(getClientProtocol(clazz, config), config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omHost) - throws IOException { - Configuration config = new OzoneConfiguration(); - int port = OmUtils.getOmRpcPort(config); - return getRpcClient(omHost, port, config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omRpcPort - * RPC port of OzoneManager. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omHost, Integer omRpcPort) - throws IOException { - return getRpcClient(omHost, omRpcPort, new OzoneConfiguration()); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omRpcPort - * RPC port of OzoneManager. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omHost, Integer omRpcPort, - Configuration config) - throws IOException { - Preconditions.checkNotNull(omHost); - Preconditions.checkNotNull(omRpcPort); - Preconditions.checkNotNull(config); - config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort); - return getRpcClient(config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param config - * used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(Configuration config) - throws IOException { - Preconditions.checkNotNull(config); - return getClient(getClientProtocol(RpcClient.class, config), - config); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(String omHost) - throws IOException { - Configuration config = new OzoneConfiguration(); - int port = OmUtils.getOmRestPort(config); - return getRestClient(omHost, port, config); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omHttpPort - * HTTP port of OzoneManager. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(String omHost, Integer omHttpPort) - throws IOException { - return getRestClient(omHost, omHttpPort, new OzoneConfiguration()); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omHttpPort - * HTTP port of OzoneManager. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(String omHost, Integer omHttpPort, - Configuration config) - throws IOException { - Preconditions.checkNotNull(omHost); - Preconditions.checkNotNull(omHttpPort); - Preconditions.checkNotNull(config); - config.set(OZONE_OM_HTTP_ADDRESS_KEY, omHost + ":" + omHttpPort); - return getRestClient(config); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(Configuration config) - throws IOException { - Preconditions.checkNotNull(config); - return getClient(getClientProtocol(RestClient.class, config), - config); - } - - /** - * Creates OzoneClient with the given ClientProtocol and Configuration. - * - * @param clientProtocol - * Protocol to be used by the OzoneClient - * - * @param config - * Configuration to be used for OzoneClient creation - */ - private static OzoneClient getClient(ClientProtocol clientProtocol, - Configuration config) { - OzoneClientInvocationHandler clientHandler = - new OzoneClientInvocationHandler(clientProtocol); - ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance( - OzoneClientInvocationHandler.class.getClassLoader(), - new Class[]{ClientProtocol.class}, clientHandler); - return new OzoneClient(config, proxy); - } - - /** - * Returns an instance of Protocol class. - * - * @param protocolClass - * Class object of the ClientProtocol. - * - * @param config - * Configuration used to initialize ClientProtocol. - * - * @return ClientProtocol - * - * @throws IOException - */ - private static ClientProtocol getClientProtocol( - Class protocolClass, Configuration config) - throws IOException { - try { - LOG.debug("Using {} as client protocol.", - protocolClass.getCanonicalName()); - Constructor ctor = - protocolClass.getConstructor(Configuration.class); - return ctor.newInstance(config); - } catch (Exception e) { - final String message = "Couldn't create protocol " + protocolClass; - LOG.error(message + " exception: ", e); - if (e.getCause() instanceof IOException) { - throw (IOException) e.getCause(); - } else if (e instanceof InvocationTargetException) { - throw new IOException(message, - ((InvocationTargetException) e).getTargetException()); - } else { - throw new IOException(message, e); - } - } - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java deleted file mode 100644 index 3051e2dd1c8b3..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - -/** - * Invocation Handler for ozone client which dispatches the call to underlying - * ClientProtocol implementation. - */ -public class OzoneClientInvocationHandler implements InvocationHandler { - - - private static final Logger LOG = LoggerFactory.getLogger(OzoneClient.class); - private final ClientProtocol target; - - /** - * Constructs OzoneClientInvocationHandler with the proxy. - * @param target proxy to be used for method invocation. - */ - public OzoneClientInvocationHandler(ClientProtocol target) { - this.target = target; - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - LOG.trace("Invoking method {} on proxy {}", method, proxy); - try { - long startTime = Time.monotonicNow(); - Object result = method.invoke(target, args); - LOG.debug("Call: {} took {} ms", method, - Time.monotonicNow() - startTime); - return result; - } catch(InvocationTargetException iEx) { - throw iEx.getCause(); - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java deleted file mode 100644 index 40e4d83113e32..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.BlockNotCommittedException; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.response.*; - -import java.util.ArrayList; -import java.util.List; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -/** A utility class for OzoneClient. */ -public final class OzoneClientUtils { - - private OzoneClientUtils() {} - - /** - * Returns a BucketInfo object constructed using fields of the input - * OzoneBucket object. - * - * @param bucket OzoneBucket instance from which BucketInfo object needs to - * be created. - * @return BucketInfo instance - */ - public static BucketInfo asBucketInfo(OzoneBucket bucket) { - BucketInfo bucketInfo = - new BucketInfo(bucket.getVolumeName(), bucket.getName()); - bucketInfo - .setCreatedOn(HddsClientUtils.formatDateTime(bucket.getCreationTime())); - bucketInfo.setStorageType(bucket.getStorageType()); - bucketInfo.setVersioning( - OzoneConsts.Versioning.getVersioning(bucket.getVersioning())); - bucketInfo.setAcls(bucket.getAcls()); - return bucketInfo; - } - - /** - * Returns a VolumeInfo object constructed using fields of the input - * OzoneVolume object. - * - * @param volume OzoneVolume instance from which VolumeInfo object needs to - * be created. - * @return VolumeInfo instance - */ - public static VolumeInfo asVolumeInfo(OzoneVolume volume) { - VolumeInfo volumeInfo = new VolumeInfo(volume.getName(), - HddsClientUtils.formatDateTime(volume.getCreationTime()), - volume.getOwner()); - volumeInfo.setQuota(OzoneQuota.getOzoneQuota(volume.getQuota())); - volumeInfo.setOwner(new VolumeOwner(volume.getOwner())); - return volumeInfo; - } - - /** - * Returns a KeyInfo object constructed using fields of the input - * OzoneKey object. - * - * @param key OzoneKey instance from which KeyInfo object needs to - * be created. - * @return KeyInfo instance - */ - public static KeyInfo asKeyInfo(OzoneKey key) { - KeyInfo keyInfo = new KeyInfo(); - keyInfo.setKeyName(key.getName()); - keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime())); - keyInfo.setModifiedOn( - HddsClientUtils.formatDateTime(key.getModificationTime())); - keyInfo.setSize(key.getDataSize()); - return keyInfo; - } - - public static RetryPolicy createRetryPolicy(Configuration conf) { - int maxRetryCount = - conf.getInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, OzoneConfigKeys. - OZONE_CLIENT_MAX_RETRIES_DEFAULT); - long retryInterval = conf.getTimeDuration(OzoneConfigKeys. - OZONE_CLIENT_RETRY_INTERVAL, OzoneConfigKeys. - OZONE_CLIENT_RETRY_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - RetryPolicy basePolicy = RetryPolicies - .retryUpToMaximumCountWithFixedSleep(maxRetryCount, retryInterval, - TimeUnit.MILLISECONDS); - Map, RetryPolicy> exceptionToPolicyMap = - new HashMap, RetryPolicy>(); - exceptionToPolicyMap.put(BlockNotCommittedException.class, basePolicy); - RetryPolicy retryPolicy = RetryPolicies - .retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, - exceptionToPolicyMap); - return retryPolicy; - } - /** - * Returns a KeyInfoDetails object constructed using fields of the input - * OzoneKeyDetails object. - * - * @param key OzoneKeyDetails instance from which KeyInfo object needs to - * be created. - * @return KeyInfoDetails instance - */ - public static KeyInfoDetails asKeyInfoDetails(OzoneKeyDetails key) { - KeyInfoDetails keyInfo = new KeyInfoDetails(); - keyInfo.setKeyName(key.getName()); - keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime())); - keyInfo.setModifiedOn( - HddsClientUtils.formatDateTime(key.getModificationTime())); - keyInfo.setSize(key.getDataSize()); - List keyLocations = new ArrayList<>(); - key.getOzoneKeyLocations().forEach((a) -> keyLocations.add(new KeyLocation( - a.getContainerID(), a.getLocalID(), a.getLength(), a.getOffset()))); - keyInfo.setKeyLocation(keyLocations); - return keyInfo; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java deleted file mode 100644 index 7c93146abdacc..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * A class that encapsulates OzoneKey. - */ -public class OzoneKey { - - /** - * Name of the Volume the Key belongs to. - */ - private final String volumeName; - /** - * Name of the Bucket the Key belongs to. - */ - private final String bucketName; - /** - * Name of the Key. - */ - private final String name; - /** - * Size of the data. - */ - private final long dataSize; - /** - * Creation time of the key. - */ - private long creationTime; - /** - * Modification time of the key. - */ - private long modificationTime; - - /** - * Constructs OzoneKey from OmKeyInfo. - * - */ - public OzoneKey(String volumeName, String bucketName, - String keyName, long size, long creationTime, - long modificationTime) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.name = keyName; - this.dataSize = size; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - } - - /** - * Returns Volume Name associated with the Key. - * - * @return volumeName - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns Bucket Name associated with the Key. - * - * @return bucketName - */ - public String getBucketName(){ - return bucketName; - } - - /** - * Returns the Key Name. - * - * @return keyName - */ - public String getName() { - return name; - } - - /** - * Returns the size of the data. - * - * @return dataSize - */ - public long getDataSize() { - return dataSize; - } - - /** - * Returns the creation time of the key. - * - * @return creation time - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns the modification time of the key. - * - * @return modification time - */ - public long getModificationTime() { - return modificationTime; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java deleted file mode 100644 index e7709ddfb899f..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import java.util.List; - -/** - * A class that encapsulates OzoneKeyLocation. - */ -public class OzoneKeyDetails extends OzoneKey { - - /** - * A list of block location information to specify replica locations. - */ - private List ozoneKeyLocations; - - /** - * Constructs OzoneKeyDetails from OmKeyInfo. - */ - public OzoneKeyDetails(String volumeName, String bucketName, String keyName, - long size, long creationTime, long modificationTime, - List ozoneKeyLocations) { - super(volumeName, bucketName, keyName, size, creationTime, - modificationTime); - this.ozoneKeyLocations = ozoneKeyLocations; - } - - /** - * Returns the location detail information of the specific Key. - */ - public List getOzoneKeyLocations() { - return ozoneKeyLocations; - } - - /** - * Set details of key location. - * @param ozoneKeyLocations - details of key location - */ - public void setOzoneKeyLocations(List ozoneKeyLocations) { - this.ozoneKeyLocations = ozoneKeyLocations; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java deleted file mode 100644 index 0ff8ba749b622..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * One key can be stored in one or more containers as one or more blocks. - * This class represents one such block instance. - */ -public class OzoneKeyLocation { - /** - * Which container this key stored. - */ - private final long containerID; - /** - * Which block this key stored inside a container. - */ - private final long localID; - /** - * Data length of this key replica. - */ - private final long length; - /** - * Offset of this key. - */ - private final long offset; - - /** - * Constructs OzoneKeyLocation. - */ - public OzoneKeyLocation(long containerID, long localID, - long length, long offset) { - this.containerID = containerID; - this.localID = localID; - this.length = length; - this.offset = offset; - } - - /** - * Returns the containerID of this Key. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the localID of this Key. - */ - public long getLocalID() { - return localID; - } - - /** - * Returns the length of this Key. - */ - public long getLength() { - return length; - } - - /** - * Returns the offset of this Key. - */ - public long getOffset() { - return offset; - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java deleted file mode 100644 index e451b1ac24bfa..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ /dev/null @@ -1,311 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import java.io.IOException; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -/** - * A class that encapsulates OzoneVolume. - */ -public class OzoneVolume { - - /** - * The proxy used for connecting to the cluster and perform - * client operations. - */ - private final ClientProtocol proxy; - - /** - * Name of the Volume. - */ - private final String name; - - /** - * Admin Name of the Volume. - */ - private String admin; - /** - * Owner of the Volume. - */ - private String owner; - /** - * Quota allocated for the Volume. - */ - private long quotaInBytes; - /** - * Creation time of the volume. - */ - private long creationTime; - /** - * Volume ACLs. - */ - private List acls; - - private int listCacheSize; - - /** - * Constructs OzoneVolume instance. - * @param conf Configuration object. - * @param proxy ClientProtocol proxy. - * @param name Name of the volume. - * @param admin Volume admin. - * @param owner Volume owner. - * @param quotaInBytes Volume quota in bytes. - * @param creationTime creation time of the volume - * @param acls ACLs associated with the volume. - */ - public OzoneVolume(Configuration conf, ClientProtocol proxy, String name, - String admin, String owner, long quotaInBytes, - long creationTime, List acls) { - Preconditions.checkNotNull(proxy, "Client proxy is not set."); - this.proxy = proxy; - this.name = name; - this.admin = admin; - this.owner = owner; - this.quotaInBytes = quotaInBytes; - this.creationTime = creationTime; - this.acls = acls; - this.listCacheSize = HddsClientUtils.getListCacheSize(conf); - } - - @VisibleForTesting - protected OzoneVolume(String name, String admin, String owner, - long quotaInBytes, - long creationTime, List acls) { - this.proxy = null; - this.name = name; - this.admin = admin; - this.owner = owner; - this.quotaInBytes = quotaInBytes; - this.creationTime = creationTime; - this.acls = acls; - } - - /** - * Returns Volume name. - * - * @return volumeName - */ - public String getName() { - return name; - } - - /** - * Returns Volume's admin name. - * - * @return adminName - */ - public String getAdmin() { - return admin; - } - - /** - * Returns Volume's owner name. - * - * @return ownerName - */ - public String getOwner() { - return owner; - } - - /** - * Returns Quota allocated for the Volume in bytes. - * - * @return quotaInBytes - */ - public long getQuota() { - return quotaInBytes; - } - - /** - * Returns creation time of the volume. - * - * @return creation time. - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns OzoneAcl list associated with the Volume. - * - * @return aclMap - */ - public List getAcls() { - return acls; - } - - /** - * Sets/Changes the owner of this Volume. - * @param owner new owner - * @throws IOException - */ - public void setOwner(String owner) throws IOException { - proxy.setVolumeOwner(name, owner); - this.owner = owner; - } - - /** - * Sets/Changes the quota of this Volume. - * @param quota new quota - * @throws IOException - */ - public void setQuota(OzoneQuota quota) throws IOException { - proxy.setVolumeQuota(name, quota); - this.quotaInBytes = quota.sizeInBytes(); - } - - /** - * Creates a new Bucket in this Volume, with default values. - * @param bucketName Name of the Bucket - * @throws IOException - */ - public void createBucket(String bucketName) - throws IOException { - proxy.createBucket(name, bucketName); - } - - /** - * Creates a new Bucket in this Volume, with properties set in bucketArgs. - * @param bucketName Name of the Bucket - * @param bucketArgs Properties to be set - * @throws IOException - */ - public void createBucket(String bucketName, BucketArgs bucketArgs) - throws IOException { - proxy.createBucket(name, bucketName, bucketArgs); - } - - /** - * Get the Bucket from this Volume. - * @param bucketName Name of the Bucket - * @return OzoneBucket - * @throws IOException - */ - public OzoneBucket getBucket(String bucketName) throws IOException { - OzoneBucket bucket = proxy.getBucketDetails(name, bucketName); - return bucket; - } - - /** - * Returns Iterator to iterate over all buckets in the volume. - * The result can be restricted using bucket prefix, will return all - * buckets if bucket prefix is null. - * - * @param bucketPrefix Bucket prefix to match - * @return {@code Iterator} - */ - public Iterator listBuckets(String bucketPrefix) { - return listBuckets(bucketPrefix, null); - } - - /** - * Returns Iterator to iterate over all buckets after prevBucket in the - * volume. - * If prevBucket is null it iterates from the first bucket in the volume. - * The result can be restricted using bucket prefix, will return all - * buckets if bucket prefix is null. - * - * @param bucketPrefix Bucket prefix to match - * @param prevBucket Buckets are listed after this bucket - * @return {@code Iterator} - */ - public Iterator listBuckets(String bucketPrefix, - String prevBucket) { - return new BucketIterator(bucketPrefix, prevBucket); - } - - /** - * Deletes the Bucket from this Volume. - * @param bucketName Name of the Bucket - * @throws IOException - */ - public void deleteBucket(String bucketName) throws IOException { - proxy.deleteBucket(name, bucketName); - } - - - /** - * An Iterator to iterate over {@link OzoneBucket} list. - */ - private class BucketIterator implements Iterator { - - private String bucketPrefix = null; - - private Iterator currentIterator; - private OzoneBucket currentValue; - - - /** - * Creates an Iterator to iterate over all buckets after prevBucket in the volume. - * If prevBucket is null it iterates from the first bucket in the volume. - * The returned buckets match bucket prefix. - * @param bucketPrefix - */ - public BucketIterator(String bucketPrefix, String prevBucket) { - this.bucketPrefix = bucketPrefix; - this.currentValue = null; - this.currentIterator = getNextListOfBuckets(prevBucket).iterator(); - } - - @Override - public boolean hasNext() { - if(!currentIterator.hasNext()) { - currentIterator = getNextListOfBuckets( - currentValue != null ? currentValue.getName() : null) - .iterator(); - } - return currentIterator.hasNext(); - } - - @Override - public OzoneBucket next() { - if(hasNext()) { - currentValue = currentIterator.next(); - return currentValue; - } - throw new NoSuchElementException(); - } - - /** - * Gets the next set of bucket list using proxy. - * @param prevBucket - * @return {@code List} - */ - private List getNextListOfBuckets(String prevBucket) { - try { - return proxy.listBuckets(name, bucketPrefix, prevBucket, listCacheSize); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java deleted file mode 100644 index ae1cfccd420ef..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.ozone.OzoneAcl; - -import java.io.IOException; -import java.util.List; - -/** - * This class encapsulates the arguments that are - * required for creating a volume. - */ -public final class VolumeArgs { - - private final String admin; - private final String owner; - private final String quota; - private final List acls; - - /** - * Private constructor, constructed via builder. - * @param admin Administrator's name. - * @param owner Volume owner's name - * @param quota Volume Quota. - * @param acls User to access rights map. - */ - private VolumeArgs(String admin, String owner, - String quota, List acls) { - this.admin = admin; - this.owner = owner; - this.quota = quota; - this.acls = acls; - } - - /** - * Returns the Admin Name. - * @return String. - */ - public String getAdmin() { - return admin; - } - - /** - * Returns the owner Name. - * @return String - */ - public String getOwner() { - return owner; - } - - /** - * Returns Volume Quota. - * @return Quota. - */ - public String getQuota() { - return quota; - } - - public List getAcls() { - return acls; - } - /** - * Returns new builder class that builds a OmVolumeArgs. - * - * @return Builder - */ - public static VolumeArgs.Builder newBuilder() { - return new VolumeArgs.Builder(); - } - - /** - * Builder for OmVolumeArgs. - */ - public static class Builder { - private String adminName; - private String ownerName; - private String volumeQuota; - private List listOfAcls; - - - public VolumeArgs.Builder setAdmin(String admin) { - this.adminName = admin; - return this; - } - - public VolumeArgs.Builder setOwner(String owner) { - this.ownerName = owner; - return this; - } - - public VolumeArgs.Builder setQuota(String quota) { - this.volumeQuota = quota; - return this; - } - - public VolumeArgs.Builder setAcls(List acls) - throws IOException { - this.listOfAcls = acls; - return this; - } - - /** - * Constructs a CreateVolumeArgument. - * @return CreateVolumeArgs. - */ - public VolumeArgs build() { - return new VolumeArgs(adminName, ownerName, volumeQuota, listOfAcls); - } - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java deleted file mode 100644 index 2b10578824a5b..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.ratis.util.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Maintaining a list of ChunkInputStream. Read based on offset. - */ -public class ChunkGroupInputStream extends InputStream implements Seekable { - - private static final Logger LOG = - LoggerFactory.getLogger(ChunkGroupInputStream.class); - - private static final int EOF = -1; - - private final ArrayList streamEntries; - // streamOffset[i] stores the offset at which chunkInputStream i stores - // data in the key - private long[] streamOffset = null; - private int currentStreamIndex; - private long length = 0; - private boolean closed = false; - private String key; - - public ChunkGroupInputStream() { - streamEntries = new ArrayList<>(); - currentStreamIndex = 0; - } - - @VisibleForTesting - public synchronized int getCurrentStreamIndex() { - return currentStreamIndex; - } - - @VisibleForTesting - public long getRemainingOfIndex(int index) throws IOException { - return streamEntries.get(index).getRemaining(); - } - - /** - * Append another stream to the end of the list. - * - * @param stream the stream instance. - * @param streamLength the max number of bytes that should be written to this - * stream. - */ - public synchronized void addStream(ChunkInputStream stream, - long streamLength) { - streamEntries.add(new ChunkInputStreamEntry(stream, streamLength)); - } - - - @Override - public synchronized int read() throws IOException { - byte[] buf = new byte[1]; - if (read(buf, 0, 1) == EOF) { - return EOF; - } - return Byte.toUnsignedInt(buf[0]); - } - - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - checkNotClosed(); - if (b == null) { - throw new NullPointerException(); - } - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return 0; - } - int totalReadLen = 0; - while (len > 0) { - if (streamEntries.size() <= currentStreamIndex) { - return totalReadLen == 0 ? EOF : totalReadLen; - } - ChunkInputStreamEntry current = streamEntries.get(currentStreamIndex); - int numBytesToRead = Math.min(len, (int)current.getRemaining()); - int numBytesRead = current.read(b, off, numBytesToRead); - if (numBytesRead != numBytesToRead) { - // This implies that there is either data loss or corruption in the - // chunk entries. Even EOF in the current stream would be covered in - // this case. - throw new IOException(String.format( - "Inconsistent read for blockID=%s length=%d numBytesRead=%d", - current.chunkInputStream.getBlockID(), current.length, - numBytesRead)); - } - totalReadLen += numBytesRead; - off += numBytesRead; - len -= numBytesRead; - if (current.getRemaining() <= 0) { - currentStreamIndex += 1; - } - } - return totalReadLen; - } - - @Override - public void seek(long pos) throws IOException { - checkNotClosed(); - if (pos < 0 || pos >= length) { - if (pos == 0) { - // It is possible for length and pos to be zero in which case - // seek should return instead of throwing exception - return; - } - throw new EOFException( - "EOF encountered at pos: " + pos + " for key: " + key); - } - Preconditions.assertTrue(currentStreamIndex >= 0); - if (currentStreamIndex >= streamEntries.size()) { - currentStreamIndex = Arrays.binarySearch(streamOffset, pos); - } else if (pos < streamOffset[currentStreamIndex]) { - currentStreamIndex = - Arrays.binarySearch(streamOffset, 0, currentStreamIndex, pos); - } else if (pos >= streamOffset[currentStreamIndex] + streamEntries - .get(currentStreamIndex).length) { - currentStreamIndex = Arrays - .binarySearch(streamOffset, currentStreamIndex + 1, - streamEntries.size(), pos); - } - if (currentStreamIndex < 0) { - // Binary search returns -insertionPoint - 1 if element is not present - // in the array. insertionPoint is the point at which element would be - // inserted in the sorted array. We need to adjust the currentStreamIndex - // accordingly so that currentStreamIndex = insertionPoint - 1 - currentStreamIndex = -currentStreamIndex - 2; - } - // seek to the proper offset in the ChunkInputStream - streamEntries.get(currentStreamIndex) - .seek(pos - streamOffset[currentStreamIndex]); - } - - @Override - public long getPos() throws IOException { - return length == 0 ? 0 : - streamOffset[currentStreamIndex] + streamEntries.get(currentStreamIndex) - .getPos(); - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - - @Override - public int available() throws IOException { - checkNotClosed(); - long remaining = length - getPos(); - return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE; - } - - @Override - public void close() throws IOException { - closed = true; - for (int i = 0; i < streamEntries.size(); i++) { - streamEntries.get(i).close(); - } - } - - /** - * Encapsulates ChunkInputStream. - */ - public static class ChunkInputStreamEntry extends InputStream - implements Seekable { - - private final ChunkInputStream chunkInputStream; - private final long length; - - public ChunkInputStreamEntry(ChunkInputStream chunkInputStream, - long length) { - this.chunkInputStream = chunkInputStream; - this.length = length; - } - - synchronized long getRemaining() throws IOException { - return length - getPos(); - } - - @Override - public synchronized int read(byte[] b, int off, int len) - throws IOException { - int readLen = chunkInputStream.read(b, off, len); - return readLen; - } - - @Override - public synchronized int read() throws IOException { - int data = chunkInputStream.read(); - return data; - } - - @Override - public synchronized void close() throws IOException { - chunkInputStream.close(); - } - - @Override - public void seek(long pos) throws IOException { - chunkInputStream.seek(pos); - } - - @Override - public long getPos() throws IOException { - return chunkInputStream.getPos(); - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - } - - public static LengthInputStream getFromOmKeyInfo( - OmKeyInfo keyInfo, - XceiverClientManager xceiverClientManager, - StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient, - String requestId) throws IOException { - long length = 0; - long containerKey; - ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream(); - groupInputStream.key = keyInfo.getKeyName(); - List keyLocationInfos = - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); - groupInputStream.streamOffset = new long[keyLocationInfos.size()]; - for (int i = 0; i < keyLocationInfos.size(); i++) { - OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(i); - BlockID blockID = omKeyLocationInfo.getBlockID(); - long containerID = blockID.getContainerID(); - ContainerWithPipeline containerWithPipeline = - storageContainerLocationClient.getContainerWithPipeline(containerID); - XceiverClientSpi xceiverClient = xceiverClientManager - .acquireClient(containerWithPipeline.getPipeline(), containerID); - boolean success = false; - containerKey = omKeyLocationInfo.getLocalID(); - try { - LOG.debug("get key accessing {} {}", - containerID, containerKey); - groupInputStream.streamOffset[i] = length; - ContainerProtos.DatanodeBlockID datanodeBlockID = blockID - .getDatanodeBlockIDProtobuf(); - ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls - .getBlock(xceiverClient, datanodeBlockID, requestId); - List chunks = - response.getBlockData().getChunksList(); - for (ContainerProtos.ChunkInfo chunk : chunks) { - length += chunk.getLen(); - } - success = true; - ChunkInputStream inputStream = new ChunkInputStream( - omKeyLocationInfo.getBlockID(), xceiverClientManager, xceiverClient, - chunks, requestId); - groupInputStream.addStream(inputStream, - omKeyLocationInfo.getLength()); - } finally { - if (!success) { - xceiverClientManager.releaseClient(xceiverClient); - } - } - } - groupInputStream.length = length; - return new LengthInputStream(groupInputStream, length); - } - - /** - * Verify that the input stream is open. Non blocking; this gives - * the last state of the volatile {@link #closed} field. - * @throws IOException if the connection is closed. - */ - private void checkNotClosed() throws IOException { - if (closed) { - throw new IOException( - ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + key); - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java deleted file mode 100644 index 3742a9a5d186d..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java +++ /dev/null @@ -1,733 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.storage.ChunkOutputStream; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.ListIterator; - -/** - * Maintaining a list of ChunkInputStream. Write based on offset. - * - * Note that this may write to multiple containers in one write call. In case - * that first container succeeded but later ones failed, the succeeded writes - * are not rolled back. - * - * TODO : currently not support multi-thread access. - */ -public class ChunkGroupOutputStream extends OutputStream { - - public static final Logger LOG = - LoggerFactory.getLogger(ChunkGroupOutputStream.class); - - // array list's get(index) is O(1) - private final ArrayList streamEntries; - private int currentStreamIndex; - private long byteOffset; - private final OzoneManagerProtocolClientSideTranslatorPB omClient; - private final - StorageContainerLocationProtocolClientSideTranslatorPB scmClient; - private final OmKeyArgs keyArgs; - private final long openID; - private final XceiverClientManager xceiverClientManager; - private final int chunkSize; - private final String requestID; - private boolean closed; - private final RetryPolicy retryPolicy; - /** - * A constructor for testing purpose only. - */ - @VisibleForTesting - public ChunkGroupOutputStream() { - streamEntries = new ArrayList<>(); - omClient = null; - scmClient = null; - keyArgs = null; - openID = -1; - xceiverClientManager = null; - chunkSize = 0; - requestID = null; - closed = false; - retryPolicy = null; - } - - /** - * For testing purpose only. Not building output stream from blocks, but - * taking from externally. - * - * @param outputStream - * @param length - */ - @VisibleForTesting - public void addStream(OutputStream outputStream, long length) { - streamEntries.add(new ChunkOutputStreamEntry(outputStream, length)); - } - - @VisibleForTesting - public List getStreamEntries() { - return streamEntries; - } - - public List getLocationInfoList() { - List locationInfoList = new ArrayList<>(); - for (ChunkOutputStreamEntry streamEntry : streamEntries) { - OmKeyLocationInfo info = - new OmKeyLocationInfo.Builder().setBlockID(streamEntry.blockID) - .setShouldCreateContainer(false) - .setLength(streamEntry.currentPosition).setOffset(0).build(); - locationInfoList.add(info); - } - return locationInfoList; - } - - public ChunkGroupOutputStream( - OpenKeySession handler, XceiverClientManager xceiverClientManager, - StorageContainerLocationProtocolClientSideTranslatorPB scmClient, - OzoneManagerProtocolClientSideTranslatorPB omClient, - int chunkSize, String requestId, ReplicationFactor factor, - ReplicationType type, RetryPolicy retryPolicy) throws IOException { - this.streamEntries = new ArrayList<>(); - this.currentStreamIndex = 0; - this.byteOffset = 0; - this.omClient = omClient; - this.scmClient = scmClient; - OmKeyInfo info = handler.getKeyInfo(); - this.keyArgs = new OmKeyArgs.Builder() - .setVolumeName(info.getVolumeName()) - .setBucketName(info.getBucketName()) - .setKeyName(info.getKeyName()) - .setType(type) - .setFactor(factor) - .setDataSize(info.getDataSize()).build(); - this.openID = handler.getId(); - this.xceiverClientManager = xceiverClientManager; - this.chunkSize = chunkSize; - this.requestID = requestId; - this.retryPolicy = retryPolicy; - LOG.debug("Expecting open key with one block, but got" + - info.getKeyLocationVersions().size()); - } - - /** - * When a key is opened, it is possible that there are some blocks already - * allocated to it for this open session. In this case, to make use of these - * blocks, we need to add these blocks to stream entries. But, a key's version - * also includes blocks from previous versions, we need to avoid adding these - * old blocks to stream entries, because these old blocks should not be picked - * for write. To do this, the following method checks that, only those - * blocks created in this particular open version are added to stream entries. - * - * @param version the set of blocks that are pre-allocated. - * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException - */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { - // server may return any number of blocks, (0 to any) - // only the blocks allocated in this open session (block createVersion - // equals to open session version) - for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) { - if (subKeyInfo.getCreateVersion() == openVersion) { - checkKeyLocationInfo(subKeyInfo); - } - } - } - - private void checkKeyLocationInfo(OmKeyLocationInfo subKeyInfo) - throws IOException { - ContainerWithPipeline containerWithPipeline = scmClient - .getContainerWithPipeline(subKeyInfo.getContainerID()); - ContainerInfo container = containerWithPipeline.getContainerInfo(); - - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(containerWithPipeline.getPipeline(), - container.getContainerID()); - // create container if needed - if (subKeyInfo.getShouldCreateContainer()) { - try { - ContainerProtocolCalls.createContainer(xceiverClient, - container.getContainerID(), requestID); - scmClient.notifyObjectStageChange( - ObjectStageChangeRequestProto.Type.container, - subKeyInfo.getContainerID(), - ObjectStageChangeRequestProto.Op.create, - ObjectStageChangeRequestProto.Stage.complete); - } catch (StorageContainerException ex) { - if (ex.getResult().equals(Result.CONTAINER_EXISTS)) { - //container already exist, this should never happen - LOG.debug("Container {} already exists.", - container.getContainerID()); - } else { - LOG.error("Container creation failed for {}.", - container.getContainerID(), ex); - throw ex; - } - } - } - streamEntries.add(new ChunkOutputStreamEntry(subKeyInfo.getBlockID(), - keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID, - chunkSize, subKeyInfo.getLength())); - } - - @VisibleForTesting - public long getByteOffset() { - return byteOffset; - } - - - @Override - public void write(int b) throws IOException { - byte[] buf = new byte[1]; - buf[0] = (byte) b; - write(buf, 0, 1); - } - - /** - * Try to write the bytes sequence b[off:off+len) to streams. - * - * NOTE: Throws exception if the data could not fit into the remaining space. - * In which case nothing will be written. - * TODO:May need to revisit this behaviour. - * - * @param b byte data - * @param off starting offset - * @param len length to write - * @throws IOException - */ - @Override - public void write(byte[] b, int off, int len) - throws IOException { - checkNotClosed(); - handleWrite(b, off, len); - } - - private void handleWrite(byte[] b, int off, int len) throws IOException { - if (b == null) { - throw new NullPointerException(); - } - if ((off < 0) || (off > b.length) || (len < 0) || - ((off + len) > b.length) || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return; - } - int succeededAllocates = 0; - while (len > 0) { - if (streamEntries.size() <= currentStreamIndex) { - Preconditions.checkNotNull(omClient); - // allocate a new block, if a exception happens, log an error and - // throw exception to the caller directly, and the write fails. - try { - allocateNewBlock(currentStreamIndex); - succeededAllocates += 1; - } catch (IOException ioe) { - LOG.error("Try to allocate more blocks for write failed, already " + - "allocated " + succeededAllocates + " blocks for this write."); - throw ioe; - } - } - // in theory, this condition should never violate due the check above - // still do a sanity check. - Preconditions.checkArgument(currentStreamIndex < streamEntries.size()); - ChunkOutputStreamEntry current = streamEntries.get(currentStreamIndex); - int writeLen = Math.min(len, (int) current.getRemaining()); - try { - current.write(b, off, writeLen); - } catch (IOException ioe) { - if (checkIfContainerIsClosed(ioe)) { - handleCloseContainerException(current, currentStreamIndex); - continue; - } else { - throw ioe; - } - } - if (current.getRemaining() <= 0) { - // since the current block is already written close the stream. - handleFlushOrClose(true); - currentStreamIndex += 1; - } - len -= writeLen; - off += writeLen; - byteOffset += writeLen; - } - } - - private long getCommittedBlockLength(ChunkOutputStreamEntry streamEntry) - throws IOException { - long blockLength; - ContainerProtos.GetCommittedBlockLengthResponseProto responseProto; - RetryPolicy.RetryAction action; - int numRetries = 0; - while (true) { - try { - responseProto = ContainerProtocolCalls - .getCommittedBlockLength(streamEntry.xceiverClient, - streamEntry.blockID, requestID); - blockLength = responseProto.getBlockLength(); - return blockLength; - } catch (StorageContainerException sce) { - try { - action = retryPolicy.shouldRetry(sce, numRetries, 0, true); - } catch (Exception e) { - throw e instanceof IOException ? (IOException) e : new IOException(e); - } - if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) { - if (action.reason != null) { - LOG.error( - "GetCommittedBlockLength request failed. " + action.reason, - sce); - } - throw sce; - } - - // Throw the exception if the thread is interrupted - if (Thread.currentThread().isInterrupted()) { - LOG.warn("Interrupted while trying for connection"); - throw sce; - } - Preconditions.checkArgument( - action.action == RetryPolicy.RetryAction.RetryDecision.RETRY); - try { - Thread.sleep(action.delayMillis); - } catch (InterruptedException e) { - throw (IOException) new InterruptedIOException( - "Interrupted: action=" + action + ", retry policy=" + retryPolicy) - .initCause(e); - } - numRetries++; - LOG.trace("Retrying GetCommittedBlockLength request. Already tried " - + numRetries + " time(s); retry policy is " + retryPolicy); - continue; - } - } - } - - /** - * Discards the subsequent pre allocated blocks and removes the streamEntries - * from the streamEntries list for the container which is closed. - * @param containerID id of the closed container - */ - private void discardPreallocatedBlocks(long containerID) { - // currentStreamIndex < streamEntries.size() signifies that, there are still - // pre allocated blocks available. - if (currentStreamIndex < streamEntries.size()) { - ListIterator streamEntryIterator = - streamEntries.listIterator(currentStreamIndex); - while (streamEntryIterator.hasNext()) { - if (streamEntryIterator.next().blockID.getContainerID() - == containerID) { - streamEntryIterator.remove(); - } - } - } - } - - /** - * It might be possible that the blocks pre allocated might never get written - * while the stream gets closed normally. In such cases, it would be a good - * idea to trim down the locationInfoList by removing the unused blocks if any - * so as only the used block info gets updated on OzoneManager during close. - */ - private void removeEmptyBlocks() { - if (currentStreamIndex < streamEntries.size()) { - ListIterator streamEntryIterator = - streamEntries.listIterator(currentStreamIndex); - while (streamEntryIterator.hasNext()) { - if (streamEntryIterator.next().currentPosition == 0) { - streamEntryIterator.remove(); - } - } - } - } - /** - * It performs following actions : - * a. Updates the committed length at datanode for the current stream in - * datanode. - * b. Reads the data from the underlying buffer and writes it the next stream. - * - * @param streamEntry StreamEntry - * @param streamIndex Index of the entry - * @throws IOException Throws IOexception if Write fails - */ - private void handleCloseContainerException(ChunkOutputStreamEntry streamEntry, - int streamIndex) throws IOException { - long committedLength = 0; - ByteBuffer buffer = streamEntry.getBuffer(); - if (buffer == null) { - // the buffer here will be null only when closeContainerException is - // hit while calling putKey during close on chunkOutputStream. - // Since closeContainer auto commit pending keys, no need to do - // anything here. - return; - } - - // In case where not a single chunk of data has been written to the Datanode - // yet. This block does not yet exist on the datanode but cached on the - // outputStream buffer. No need to call GetCommittedBlockLength here - // for this block associated with the stream here. - if (streamEntry.currentPosition >= chunkSize - || streamEntry.currentPosition != buffer.position()) { - committedLength = getCommittedBlockLength(streamEntry); - // update the length of the current stream - streamEntry.currentPosition = committedLength; - } - - if (buffer.position() > 0) { - // If the data is still cached in the underlying stream, we need to - // allocate new block and write this data in the datanode. The cached - // data in the buffer does not exceed chunkSize. - Preconditions.checkState(buffer.position() < chunkSize); - currentStreamIndex += 1; - // readjust the byteOffset value to the length actually been written. - byteOffset -= buffer.position(); - handleWrite(buffer.array(), 0, buffer.position()); - } - - // just clean up the current stream. Since the container is already closed, - // it will be auto committed. No need to call close again here. - streamEntry.cleanup(); - // This case will arise when while writing the first chunk itself fails. - // In such case, the current block associated with the stream has no data - // written. Remove it from the current stream list. - if (committedLength == 0) { - streamEntries.remove(streamIndex); - Preconditions.checkArgument(currentStreamIndex != 0); - currentStreamIndex -= 1; - } - // discard subsequent pre allocated blocks from the streamEntries list - // from the closed container - discardPreallocatedBlocks(streamEntry.blockID.getContainerID()); - } - - private boolean checkIfContainerIsClosed(IOException ioe) { - return Optional.of(ioe.getCause()) - .filter(e -> e instanceof StorageContainerException) - .map(e -> (StorageContainerException) e) - .filter(sce -> sce.getResult() == Result.CLOSED_CONTAINER_IO) - .isPresent(); - } - - private long getKeyLength() { - return streamEntries.parallelStream().mapToLong(e -> e.currentPosition) - .sum(); - } - - /** - * Contact OM to get a new block. Set the new block with the index (e.g. - * first block has index = 0, second has index = 1 etc.) - * - * The returned block is made to new ChunkOutputStreamEntry to write. - * - * @param index the index of the block. - * @throws IOException - */ - private void allocateNewBlock(int index) throws IOException { - OmKeyLocationInfo subKeyInfo = omClient.allocateBlock(keyArgs, openID); - checkKeyLocationInfo(subKeyInfo); - } - - @Override - public void flush() throws IOException { - checkNotClosed(); - handleFlushOrClose(false); - } - - /** - * Close or Flush the latest outputStream. - * @param close Flag which decides whether to call close or flush on the - * outputStream. - * @throws IOException In case, flush or close fails with exception. - */ - private void handleFlushOrClose(boolean close) throws IOException { - if (streamEntries.size() == 0) { - return; - } - int size = streamEntries.size(); - int streamIndex = - currentStreamIndex >= size ? size - 1 : currentStreamIndex; - ChunkOutputStreamEntry entry = streamEntries.get(streamIndex); - if (entry != null) { - try { - if (close) { - entry.close(); - } else { - entry.flush(); - } - } catch (IOException ioe) { - if (checkIfContainerIsClosed(ioe)) { - // This call will allocate a new streamEntry and write the Data. - // Close needs to be retried on the newly allocated streamEntry as - // as well. - handleCloseContainerException(entry, streamIndex); - handleFlushOrClose(close); - } else { - throw ioe; - } - } - } - } - - /** - * Commit the key to OM, this will add the blocks as the new key blocks. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (closed) { - return; - } - closed = true; - handleFlushOrClose(true); - if (keyArgs != null) { - // in test, this could be null - removeEmptyBlocks(); - Preconditions.checkState(byteOffset == getKeyLength()); - keyArgs.setDataSize(byteOffset); - keyArgs.setLocationInfoList(getLocationInfoList()); - omClient.commitKey(keyArgs, openID); - } else { - LOG.warn("Closing ChunkGroupOutputStream, but key args is null"); - } - } - - /** - * Builder class of ChunkGroupOutputStream. - */ - public static class Builder { - private OpenKeySession openHandler; - private XceiverClientManager xceiverManager; - private StorageContainerLocationProtocolClientSideTranslatorPB scmClient; - private OzoneManagerProtocolClientSideTranslatorPB omClient; - private int chunkSize; - private String requestID; - private ReplicationType type; - private ReplicationFactor factor; - private RetryPolicy retryPolicy; - - public Builder setHandler(OpenKeySession handler) { - this.openHandler = handler; - return this; - } - - public Builder setXceiverClientManager(XceiverClientManager manager) { - this.xceiverManager = manager; - return this; - } - - public Builder setScmClient( - StorageContainerLocationProtocolClientSideTranslatorPB client) { - this.scmClient = client; - return this; - } - - public Builder setOmClient( - OzoneManagerProtocolClientSideTranslatorPB client) { - this.omClient = client; - return this; - } - - public Builder setChunkSize(int size) { - this.chunkSize = size; - return this; - } - - public Builder setRequestID(String id) { - this.requestID = id; - return this; - } - - public Builder setType(ReplicationType replicationType) { - this.type = replicationType; - return this; - } - - public Builder setFactor(ReplicationFactor replicationFactor) { - this.factor = replicationFactor; - return this; - } - - public ChunkGroupOutputStream build() throws IOException { - return new ChunkGroupOutputStream(openHandler, xceiverManager, scmClient, - omClient, chunkSize, requestID, factor, type, retryPolicy); - } - - public Builder setRetryPolicy(RetryPolicy rPolicy) { - this.retryPolicy = rPolicy; - return this; - } - - } - - private static class ChunkOutputStreamEntry extends OutputStream { - private OutputStream outputStream; - private final BlockID blockID; - private final String key; - private final XceiverClientManager xceiverClientManager; - private final XceiverClientSpi xceiverClient; - private final String requestId; - private final int chunkSize; - // total number of bytes that should be written to this stream - private final long length; - // the current position of this stream 0 <= currentPosition < length - private long currentPosition; - - ChunkOutputStreamEntry(BlockID blockID, String key, - XceiverClientManager xceiverClientManager, - XceiverClientSpi xceiverClient, String requestId, int chunkSize, - long length) { - this.outputStream = null; - this.blockID = blockID; - this.key = key; - this.xceiverClientManager = xceiverClientManager; - this.xceiverClient = xceiverClient; - this.requestId = requestId; - this.chunkSize = chunkSize; - - this.length = length; - this.currentPosition = 0; - } - - /** - * For testing purpose, taking a some random created stream instance. - * @param outputStream a existing writable output stream - * @param length the length of data to write to the stream - */ - ChunkOutputStreamEntry(OutputStream outputStream, long length) { - this.outputStream = outputStream; - this.blockID = null; - this.key = null; - this.xceiverClientManager = null; - this.xceiverClient = null; - this.requestId = null; - this.chunkSize = -1; - - this.length = length; - this.currentPosition = 0; - } - - long getLength() { - return length; - } - - long getRemaining() { - return length - currentPosition; - } - - private void checkStream() { - if (this.outputStream == null) { - this.outputStream = new ChunkOutputStream(blockID, - key, xceiverClientManager, xceiverClient, - requestId, chunkSize); - } - } - - @Override - public void write(int b) throws IOException { - checkStream(); - outputStream.write(b); - this.currentPosition += 1; - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - checkStream(); - outputStream.write(b, off, len); - this.currentPosition += len; - } - - @Override - public void flush() throws IOException { - if (this.outputStream != null) { - this.outputStream.flush(); - } - } - - @Override - public void close() throws IOException { - if (this.outputStream != null) { - this.outputStream.close(); - } - } - - ByteBuffer getBuffer() throws IOException { - if (this.outputStream instanceof ChunkOutputStream) { - ChunkOutputStream out = (ChunkOutputStream) this.outputStream; - return out.getBuffer(); - } - throw new IOException("Invalid Output Stream for Key: " + key); - } - - public void cleanup() { - checkStream(); - if (this.outputStream instanceof ChunkOutputStream) { - ChunkOutputStream out = (ChunkOutputStream) this.outputStream; - out.cleanup(); - } - } - - } - - /** - * Verify that the output stream is open. Non blocking; this gives - * the last state of the volatile {@link #closed} field. - * @throws IOException if the connection is closed. - */ - private void checkNotClosed() throws IOException { - if (closed) { - throw new IOException( - ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + keyArgs - .getKeyName()); - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java deleted file mode 100644 index e1f65e69a8682..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.io; - -import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; - -import java.io.IOException; -import java.io.InputStream; - -/** - * OzoneInputStream is used to read data from Ozone. - * It uses SCM's {@link ChunkInputStream} for reading the data. - */ -public class OzoneInputStream extends InputStream { - - private final InputStream inputStream; - - /** - * Constructs OzoneInputStream with ChunkInputStream. - * - * @param inputStream - */ - public OzoneInputStream(InputStream inputStream) { - this.inputStream = inputStream; - } - - @Override - public int read() throws IOException { - return inputStream.read(); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - return inputStream.read(b, off, len); - } - - @Override - public synchronized void close() throws IOException { - inputStream.close(); - } - - @Override - public int available() throws IOException { - return inputStream.available(); - } - - public InputStream getInputStream() { - return inputStream; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java deleted file mode 100644 index 5369220a43354..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.io; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * OzoneOutputStream is used to write data into Ozone. - * It uses SCM's {@link ChunkGroupOutputStream} for writing the data. - */ -public class OzoneOutputStream extends OutputStream { - - private final OutputStream outputStream; - - /** - * Constructs OzoneOutputStream with ChunkGroupOutputStream. - * - * @param outputStream - */ - public OzoneOutputStream(OutputStream outputStream) { - this.outputStream = outputStream; - } - - @Override - public void write(int b) throws IOException { - outputStream.write(b); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - outputStream.write(b, off, len); - } - - @Override - public synchronized void flush() throws IOException { - outputStream.flush(); - } - - @Override - public synchronized void close() throws IOException { - //commitKey can be done here, if needed. - outputStream.close(); - } - - public OutputStream getOutputStream() { - return outputStream; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java deleted file mode 100644 index 493ece8074e33..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.io; - -/** - * This package contains Ozone I/O classes. - */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index 7e2591a20ee83..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * This package contains Ozone Client classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java deleted file mode 100644 index 008b69d2d165d..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ /dev/null @@ -1,329 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.protocol; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; - -import java.io.IOException; -import java.util.List; - -/** - * An implementer of this interface is capable of connecting to Ozone Cluster - * and perform client operations. The protocol used for communication is - * determined by the implementation class specified by - * property ozone.client.protocol. The build-in implementation - * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and - * {@link org.apache.hadoop.ozone.client.rest.RestClient} for REST. - */ -public interface ClientProtocol { - - /** - * Creates a new Volume. - * @param volumeName Name of the Volume - * @throws IOException - */ - void createVolume(String volumeName) - throws IOException; - - /** - * Creates a new Volume with properties set in VolumeArgs. - * @param volumeName Name of the Volume - * @param args Properties to be set for the Volume - * @throws IOException - */ - void createVolume(String volumeName, VolumeArgs args) - throws IOException; - - /** - * Sets the owner of volume. - * @param volumeName Name of the Volume - * @param owner to be set for the Volume - * @throws IOException - */ - void setVolumeOwner(String volumeName, String owner) throws IOException; - - /** - * Set Volume Quota. - * @param volumeName Name of the Volume - * @param quota Quota to be set for the Volume - * @throws IOException - */ - void setVolumeQuota(String volumeName, OzoneQuota quota) - throws IOException; - - /** - * Returns {@link OzoneVolume}. - * @param volumeName Name of the Volume - * @return {@link OzoneVolume} - * @throws IOException - * */ - OzoneVolume getVolumeDetails(String volumeName) - throws IOException; - - /** - * Checks if a Volume exists and the user with a role specified has access - * to the Volume. - * @param volumeName Name of the Volume - * @param acl requested acls which needs to be checked for access - * @return Boolean - True if the user with a role can access the volume. - * This is possible for owners of the volume and admin users - * @throws IOException - */ - boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException; - - /** - * Deletes an empty Volume. - * @param volumeName Name of the Volume - * @throws IOException - */ - void deleteVolume(String volumeName) throws IOException; - - /** - * Lists all volumes in the cluster that matches the volumePrefix, - * size of the returned list depends on maxListResult. If volume prefix - * is null, returns all the volumes. The caller has to make multiple calls - * to read all volumes. - * - * @param volumePrefix Volume prefix to match - * @param prevVolume Starting point of the list, this volume is excluded - * @param maxListResult Max number of volumes to return. - * @return {@code List} - * @throws IOException - */ - List listVolumes(String volumePrefix, String prevVolume, - int maxListResult) - throws IOException; - - /** - * Lists all volumes in the cluster that are owned by the specified - * user and matches the volumePrefix, size of the returned list depends on - * maxListResult. If the user is null, return volumes owned by current user. - * If volume prefix is null, returns all the volumes. The caller has to make - * multiple calls to read all volumes. - * - * @param user User Name - * @param volumePrefix Volume prefix to match - * @param prevVolume Starting point of the list, this volume is excluded - * @param maxListResult Max number of volumes to return. - * @return {@code List} - * @throws IOException - */ - List listVolumes(String user, String volumePrefix, - String prevVolume, int maxListResult) - throws IOException; - - /** - * Creates a new Bucket in the Volume. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @throws IOException - */ - void createBucket(String volumeName, String bucketName) - throws IOException; - - /** - * Creates a new Bucket in the Volume, with properties set in BucketArgs. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param bucketArgs Bucket Arguments - * @throws IOException - */ - void createBucket(String volumeName, String bucketName, - BucketArgs bucketArgs) - throws IOException; - - /** - * Adds ACLs to the Bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param addAcls ACLs to be added - * @throws IOException - */ - void addBucketAcls(String volumeName, String bucketName, - List addAcls) - throws IOException; - - /** - * Removes ACLs from a Bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param removeAcls ACLs to be removed - * @throws IOException - */ - void removeBucketAcls(String volumeName, String bucketName, - List removeAcls) - throws IOException; - - - /** - * Enables or disables Bucket Versioning. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param versioning True to enable Versioning, False to disable. - * @throws IOException - */ - void setBucketVersioning(String volumeName, String bucketName, - Boolean versioning) - throws IOException; - - /** - * Sets the Storage Class of a Bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param storageType StorageType to be set - * @throws IOException - */ - void setBucketStorageType(String volumeName, String bucketName, - StorageType storageType) - throws IOException; - - /** - * Deletes a bucket if it is empty. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @throws IOException - */ - void deleteBucket(String volumeName, String bucketName) - throws IOException; - - /** - * True if the bucket exists and user has read access - * to the bucket else throws Exception. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @throws IOException - */ - void checkBucketAccess(String volumeName, String bucketName) - throws IOException; - - /** - * Returns {@link OzoneBucket}. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @return {@link OzoneBucket} - * @throws IOException - */ - OzoneBucket getBucketDetails(String volumeName, String bucketName) - throws IOException; - - /** - * Returns the List of Buckets in the Volume that matches the bucketPrefix, - * size of the returned list depends on maxListResult. The caller has to make - * multiple calls to read all volumes. - * @param volumeName Name of the Volume - * @param bucketPrefix Bucket prefix to match - * @param prevBucket Starting point of the list, this bucket is excluded - * @param maxListResult Max number of buckets to return. - * @return {@code List} - * @throws IOException - */ - List listBuckets(String volumeName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException; - - /** - * Writes a key in an existing bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @param size Size of the data - * @return {@link OzoneOutputStream} - * - */ - OzoneOutputStream createKey(String volumeName, String bucketName, - String keyName, long size, ReplicationType type, - ReplicationFactor factor) - throws IOException; - - /** - * Reads a key from an existing bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @return {@link OzoneInputStream} - * @throws IOException - */ - OzoneInputStream getKey(String volumeName, String bucketName, String keyName) - throws IOException; - - - /** - * Deletes an existing key. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @throws IOException - */ - void deleteKey(String volumeName, String bucketName, String keyName) - throws IOException; - - /** - * Renames an existing key within a bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param fromKeyName Name of the Key to be renamed - * @param toKeyName New name to be used for the Key - * @throws IOException - */ - void renameKey(String volumeName, String bucketName, String fromKeyName, - String toKeyName) throws IOException; - - /** - * Returns list of Keys in {Volume/Bucket} that matches the keyPrefix, - * size of the returned list depends on maxListResult. The caller has - * to make multiple calls to read all keys. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyPrefix Bucket prefix to match - * @param prevKey Starting point of the list, this key is excluded - * @param maxListResult Max number of buckets to return. - * @return {@code List} - * @throws IOException - */ - List listKeys(String volumeName, String bucketName, - String keyPrefix, String prevKey, int maxListResult) - throws IOException; - - - /** - * Get OzoneKey. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Key name - * @return {@link OzoneKey} - * @throws IOException - */ - OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, - String keyName) - throws IOException; - - /** - * Close and release the resources. - */ - void close() throws IOException; - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java deleted file mode 100644 index f4890a1e8b834..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.protocol; - -/** - * This package contains Ozone client protocol library classes. - */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java deleted file mode 100644 index abdc2fbe19a04..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; - -import java.util.List; -import java.util.Random; - -/** - * Default selector randomly picks one of the REST Server from the list. - */ -public class DefaultRestServerSelector implements RestServerSelector { - - @Override - public ServiceInfo getRestServer(List restServices) { - return restServices.get( - new Random().nextInt(restServices.size())); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java deleted file mode 100644 index 6c479f7721a75..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - - -import javax.ws.rs.core.Response; -import javax.ws.rs.ext.ExceptionMapper; - -import org.slf4j.MDC; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class the represents various errors returned by the - * Object Layer. - */ -public class OzoneExceptionMapper implements ExceptionMapper { - private static final Logger LOG = - LoggerFactory.getLogger(OzoneExceptionMapper.class); - - @Override - public Response toResponse(OzoneException exception) { - LOG.debug("Returning exception. ex: {}", exception.toJsonString()); - MDC.clear(); - return Response.status((int)exception.getHttpCode()) - .entity(exception.toJsonString()).build(); - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java deleted file mode 100644 index fdd049a527118..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ /dev/null @@ -1,912 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.client.rest.response.BucketInfo; -import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails; -import org.apache.hadoop.ozone.client.rest.response.VolumeInfo; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; -import org.apache.hadoop.ozone.web.response.ListBuckets; -import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpResponse; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; -import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.apache.http.util.EntityUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.PipedInputStream; -import java.io.PipedOutputStream; -import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URISyntaxException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; - -/** - * Ozone Client REST protocol implementation. It uses REST protocol to - * connect to Ozone Handler that executes client calls. - */ -public class RestClient implements ClientProtocol { - - private static final String PATH_SEPARATOR = "/"; - private static final Logger LOG = LoggerFactory.getLogger(RestClient.class); - - private final Configuration conf; - private final URI ozoneRestUri; - private final CloseableHttpClient httpClient; - private final UserGroupInformation ugi; - private final OzoneAcl.OzoneACLRights userRights; - - /** - * Creates RestClient instance with the given configuration. - * @param conf Configuration - * @throws IOException - */ - public RestClient(Configuration conf) - throws IOException { - try { - Preconditions.checkNotNull(conf); - this.conf = conf; - - long socketTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - long connectionTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - int maxConnection = conf.getInt( - OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX, - OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT); - - int maxConnectionPerRoute = conf.getInt( - OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX, - OzoneConfigKeys - .OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT - ); - - /* - To make RestClient Thread safe, creating the HttpClient with - ThreadSafeClientConnManager. - */ - PoolingHttpClientConnectionManager connManager = - new PoolingHttpClientConnectionManager(); - connManager.setMaxTotal(maxConnection); - connManager.setDefaultMaxPerRoute(maxConnectionPerRoute); - - this.httpClient = HttpClients.custom() - .setConnectionManager(connManager) - .setDefaultRequestConfig( - RequestConfig.custom() - .setSocketTimeout(Math.toIntExact(socketTimeout)) - .setConnectTimeout(Math.toIntExact(connectionTimeout)) - .build()) - .build(); - this.ugi = UserGroupInformation.getCurrentUser(); - this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, - OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); - - // TODO: Add new configuration parameter to configure RestServerSelector. - RestServerSelector defaultSelector = new DefaultRestServerSelector(); - InetSocketAddress restServer = getOzoneRestServerAddress(defaultSelector); - URIBuilder uriBuilder = new URIBuilder() - .setScheme("http") - .setHost(restServer.getHostName()) - .setPort(restServer.getPort()); - this.ozoneRestUri = uriBuilder.build(); - - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - private InetSocketAddress getOzoneRestServerAddress( - RestServerSelector selector) throws IOException { - String httpAddress = conf.get(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY); - - if (httpAddress == null) { - throw new IllegalArgumentException( - OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY + " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration for" + - " details on configuring Ozone."); - } - - HttpGet httpGet = new HttpGet("http://" + httpAddress + "/serviceList"); - HttpEntity entity = executeHttpRequest(httpGet); - try { - String serviceListJson = EntityUtils.toString(entity); - - ObjectMapper objectMapper = new ObjectMapper(); - TypeReference> serviceInfoReference = - new TypeReference>() { - }; - List services = objectMapper.readValue( - serviceListJson, serviceInfoReference); - - List dataNodeInfos = services.stream().filter( - a -> a.getNodeType().equals(HddsProtos.NodeType.DATANODE)) - .collect(Collectors.toList()); - - ServiceInfo restServer = selector.getRestServer(dataNodeInfos); - - return NetUtils.createSocketAddr( - NetUtils.normalizeHostName(restServer.getHostname()) + ":" - + restServer.getPort(ServicePort.Type.HTTP)); - } finally { - EntityUtils.consume(entity); - } - } - - @Override - public void createVolume(String volumeName) throws IOException { - createVolume(volumeName, VolumeArgs.newBuilder().build()); - } - - @Override - public void createVolume(String volumeName, VolumeArgs volArgs) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(volArgs); - URIBuilder builder = new URIBuilder(ozoneRestUri); - String owner = volArgs.getOwner() == null ? - ugi.getUserName() : volArgs.getOwner(); - //TODO: support for ACLs has to be done in OzoneHandler (rest server) - /** - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, - owner, userRights)); - //ACLs from VolumeArgs - if(volArgs.getAcls() != null) { - listOfAcls.addAll(volArgs.getAcls()); - } - */ - builder.setPath(PATH_SEPARATOR + volumeName); - - String quota = volArgs.getQuota(); - if(quota != null) { - builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quota); - } - - HttpPost httpPost = new HttpPost(builder.build()); - addOzoneHeaders(httpPost); - //use admin from VolumeArgs, if it's present - if(volArgs.getAdmin() != null) { - httpPost.removeHeaders(HttpHeaders.AUTHORIZATION); - httpPost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - volArgs.getAdmin()); - } - httpPost.addHeader(Header.OZONE_USER, owner); - LOG.info("Creating Volume: {}, with {} as owner and quota set to {}.", - volumeName, owner, quota == null ? "default" : quota); - EntityUtils.consume(executeHttpRequest(httpPost)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - - @Override - public void setVolumeOwner(String volumeName, String owner) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(owner); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - httpPut.addHeader(Header.OZONE_USER, owner); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void setVolumeQuota(String volumeName, OzoneQuota quota) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(quota); - String quotaString = quota.toString(); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quotaString); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public OzoneVolume getVolumeDetails(String volumeName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - builder.setParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_VOLUME); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - VolumeInfo volInfo = - VolumeInfo.parse(EntityUtils.toString(response)); - //TODO: OzoneHandler in datanode has to be modified to send ACLs - OzoneVolume volume = new OzoneVolume(conf, - this, - volInfo.getVolumeName(), - volInfo.getCreatedBy(), - volInfo.getOwner().getName(), - volInfo.getQuota().sizeInBytes(), - HddsClientUtils.formatDateTime(volInfo.getCreatedOn()), - null); - EntityUtils.consume(response); - return volume; - } catch (URISyntaxException | ParseException e) { - throw new IOException(e); - } - } - - @Override - public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void deleteVolume(String volumeName) throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - HttpDelete httpDelete = new HttpDelete(builder.build()); - addOzoneHeaders(httpDelete); - EntityUtils.consume(executeHttpRequest(httpDelete)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public List listVolumes(String volumePrefix, String prevKey, - int maxListResult) - throws IOException { - return listVolumes(null, volumePrefix, prevKey, maxListResult); - } - - @Override - public List listVolumes(String user, String volumePrefix, - String prevKey, int maxListResult) - throws IOException { - try { - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR); - builder.addParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_LIST_QUERY_SERVICE); - builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, - String.valueOf(maxListResult)); - addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, volumePrefix, builder); - addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevKey, builder); - HttpGet httpGet = new HttpGet(builder.build()); - if (!Strings.isNullOrEmpty(user)) { - httpGet.addHeader(Header.OZONE_USER, user); - } - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - ListVolumes volumeList = - ListVolumes.parse(EntityUtils.toString(response)); - EntityUtils.consume(response); - return volumeList.getVolumes().stream().map(volInfo -> { - long creationTime = 0; - try { - creationTime = HddsClientUtils.formatDateTime(volInfo.getCreatedOn()); - } catch (ParseException e) { - LOG.warn("Parse exception in getting creation time for volume", e); - } - return new OzoneVolume(conf, this, volInfo.getVolumeName(), - volInfo.getCreatedBy(), volInfo.getOwner().getName(), - volInfo.getQuota().sizeInBytes(), creationTime, null); - }).collect(Collectors.toList()); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void createBucket(String volumeName, String bucketName) - throws IOException { - createBucket(volumeName, bucketName, BucketArgs.newBuilder().build()); - } - - @Override - public void createBucket( - String volumeName, String bucketName, BucketArgs bucketArgs) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(bucketArgs); - URIBuilder builder = new URIBuilder(ozoneRestUri); - OzoneConsts.Versioning versioning = OzoneConsts.Versioning.DISABLED; - if(bucketArgs.getVersioning() != null && - bucketArgs.getVersioning()) { - versioning = OzoneConsts.Versioning.ENABLED; - } - StorageType storageType = bucketArgs.getStorageType() == null ? - StorageType.DEFAULT : bucketArgs.getStorageType(); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPost httpPost = new HttpPost(builder.build()); - addOzoneHeaders(httpPost); - - //ACLs from BucketArgs - if(bucketArgs.getAcls() != null) { - for (OzoneAcl acl : bucketArgs.getAcls()) { - httpPost.addHeader( - Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString()); - } - } - httpPost.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString()); - httpPost.addHeader(Header.OZONE_BUCKET_VERSIONING, - versioning.toString()); - LOG.info("Creating Bucket: {}/{}, with Versioning {} and Storage Type" + - " set to {}", volumeName, bucketName, versioning, - storageType); - - EntityUtils.consume(executeHttpRequest(httpPost)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void addBucketAcls( - String volumeName, String bucketName, List addAcls) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(addAcls); - URIBuilder builder = new URIBuilder(ozoneRestUri); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - - for (OzoneAcl acl : addAcls) { - httpPut.addHeader( - Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString()); - } - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void removeBucketAcls( - String volumeName, String bucketName, List removeAcls) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(removeAcls); - URIBuilder builder = new URIBuilder(ozoneRestUri); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - - for (OzoneAcl acl : removeAcls) { - httpPut.addHeader( - Header.OZONE_ACLS, Header.OZONE_ACL_REMOVE + " " + acl.toString()); - } - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void setBucketVersioning( - String volumeName, String bucketName, Boolean versioning) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(versioning); - URIBuilder builder = new URIBuilder(ozoneRestUri); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - - httpPut.addHeader(Header.OZONE_BUCKET_VERSIONING, - getBucketVersioning(versioning).toString()); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void setBucketStorageType( - String volumeName, String bucketName, StorageType storageType) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(storageType); - URIBuilder builder = new URIBuilder(ozoneRestUri); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - - httpPut.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString()); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void deleteBucket(String volumeName, String bucketName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpDelete httpDelete = new HttpDelete(builder.build()); - addOzoneHeaders(httpDelete); - EntityUtils.consume(executeHttpRequest(httpDelete)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void checkBucketAccess(String volumeName, String bucketName) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public OzoneBucket getBucketDetails(String volumeName, String bucketName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - builder.setParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_BUCKET); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - BucketInfo bucketInfo = - BucketInfo.parse(EntityUtils.toString(response)); - OzoneBucket bucket = new OzoneBucket(conf, - this, - bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), - bucketInfo.getAcls(), - bucketInfo.getStorageType(), - getBucketVersioningFlag(bucketInfo.getVersioning()), - HddsClientUtils.formatDateTime(bucketInfo.getCreatedOn())); - EntityUtils.consume(response); - return bucket; - } catch (URISyntaxException | ParseException e) { - throw new IOException(e); - } - } - - @Override - public List listBuckets(String volumeName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - builder.addParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_BUCKET); - builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, - String.valueOf(maxListResult)); - addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, bucketPrefix, builder); - addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevBucket, builder); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - ListBuckets bucketList = - ListBuckets.parse(EntityUtils.toString(response)); - EntityUtils.consume(response); - return bucketList.getBuckets().stream().map(bucketInfo -> { - long creationTime = 0; - try { - creationTime = - HddsClientUtils.formatDateTime(bucketInfo.getCreatedOn()); - } catch (ParseException e) { - LOG.warn("Parse exception in getting creation time for volume", e); - } - return new OzoneBucket(conf, this, volumeName, - bucketInfo.getBucketName(), bucketInfo.getAcls(), - bucketInfo.getStorageType(), - getBucketVersioningFlag(bucketInfo.getVersioning()), creationTime); - }).collect(Collectors.toList()); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - /** - * Writes a key in an existing bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @param size Size of the data - * @param type - * @param factor @return {@link OzoneOutputStream} - */ - @Override - public OzoneOutputStream createKey( - String volumeName, String bucketName, String keyName, long size, - ReplicationType type, ReplicationFactor factor) - throws IOException { - // TODO: Once ReplicationType and ReplicationFactor are supported in - // OzoneHandler (in Datanode), set them in header. - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, type, factor); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + - PATH_SEPARATOR + keyName); - HttpPut putRequest = new HttpPut(builder.build()); - addOzoneHeaders(putRequest); - PipedInputStream in = new PipedInputStream(); - OutputStream out = new PipedOutputStream(in); - putRequest.setEntity(new InputStreamEntity(in, size)); - FutureTask futureTask = - new FutureTask<>(() -> executeHttpRequest(putRequest)); - new Thread(futureTask).start(); - OzoneOutputStream outputStream = new OzoneOutputStream( - new OutputStream() { - @Override - public void write(int b) throws IOException { - out.write(b); - } - - @Override - public void close() throws IOException { - try { - out.close(); - EntityUtils.consume(futureTask.get()); - } catch (ExecutionException | InterruptedException e) { - throw new IOException(e); - } - } - }); - - return outputStream; - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public OzoneInputStream getKey( - String volumeName, String bucketName, String keyName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + - PATH_SEPARATOR + keyName); - HttpGet getRequest = new HttpGet(builder.build()); - addOzoneHeaders(getRequest); - HttpEntity entity = executeHttpRequest(getRequest); - PipedInputStream in = new PipedInputStream(); - OutputStream out = new PipedOutputStream(in); - FutureTask futureTask = - new FutureTask<>(() -> { - entity.writeTo(out); - out.close(); - return null; - }); - new Thread(futureTask).start(); - OzoneInputStream inputStream = new OzoneInputStream( - new InputStream() { - - @Override - public int read() throws IOException { - return in.read(); - } - - @Override - public void close() throws IOException { - in.close(); - EntityUtils.consume(entity); - } - }); - - return inputStream; - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void deleteKey(String volumeName, String bucketName, String keyName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName); - HttpDelete httpDelete = new HttpDelete(builder.build()); - addOzoneHeaders(httpDelete); - EntityUtils.consume(executeHttpRequest(httpDelete)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void renameKey(String volumeName, String bucketName, - String fromKeyName, String toKeyName) throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(fromKeyName, toKeyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName - + PATH_SEPARATOR + fromKeyName); - builder.addParameter(Header.OZONE_RENAME_TO_KEY_PARAM_NAME, toKeyName); - HttpPost httpPost = new HttpPost(builder.build()); - addOzoneHeaders(httpPost); - EntityUtils.consume(executeHttpRequest(httpPost)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public List listKeys(String volumeName, String bucketName, - String keyPrefix, String prevKey, - int maxListResult) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder - .setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName); - builder.addParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_KEY); - builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, - String.valueOf(maxListResult)); - addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, keyPrefix, builder); - addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevKey, builder); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - ListKeys keyList = ListKeys.parse(EntityUtils.toString(response)); - EntityUtils.consume(response); - return keyList.getKeyList().stream().map(keyInfo -> { - long creationTime = 0, modificationTime = 0; - try { - creationTime = HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()); - modificationTime = - HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()); - } catch (ParseException e) { - LOG.warn("Parse exception in getting creation time for volume", e); - } - return new OzoneKey(volumeName, bucketName, keyInfo.getKeyName(), - keyInfo.getSize(), creationTime, modificationTime); - }).collect(Collectors.toList()); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public OzoneKeyDetails getKeyDetails( - String volumeName, String bucketName, String keyName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName); - builder.setParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_KEY_DETAIL); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - KeyInfoDetails keyInfo = - KeyInfoDetails.parse(EntityUtils.toString(response)); - - List ozoneKeyLocations = new ArrayList<>(); - keyInfo.getKeyLocations().forEach((a) -> ozoneKeyLocations.add( - new OzoneKeyLocation(a.getContainerID(), a.getLocalID(), - a.getLength(), a.getOffset()))); - OzoneKeyDetails key = new OzoneKeyDetails(volumeName, - bucketName, - keyInfo.getKeyName(), - keyInfo.getSize(), - HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()), - HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()), - ozoneKeyLocations); - EntityUtils.consume(response); - return key; - } catch (URISyntaxException | ParseException e) { - throw new IOException(e); - } - } - - /** - * Adds Ozone headers to http request. - * - * @param httpRequest Http Request - */ - private void addOzoneHeaders(HttpUriRequest httpRequest) { - httpRequest.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - ugi.getUserName()); - httpRequest.addHeader(HttpHeaders.DATE, - HddsClientUtils.formatDateTime(Time.monotonicNow())); - httpRequest.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - } - - /** - * Sends the http request to server and returns the response HttpEntity. - * It's responsibility of the caller to consume and close response HttpEntity - * by calling {@code EntityUtils.consume} - * - * @param httpUriRequest http request - * @throws IOException - */ - private HttpEntity executeHttpRequest(HttpUriRequest httpUriRequest) - throws IOException { - HttpResponse response = httpClient.execute(httpUriRequest); - int errorCode = response.getStatusLine().getStatusCode(); - HttpEntity entity = response.getEntity(); - if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { - return entity; - } - if (entity != null) { - throw new IOException( - OzoneException.parse(EntityUtils.toString(entity))); - } else { - throw new IOException("Unexpected null in http payload," + - " while processing request"); - } - } - - /** - * Converts OzoneConts.Versioning to boolean. - * - * @param version - * @return corresponding boolean value - */ - private Boolean getBucketVersioningFlag( - OzoneConsts.Versioning version) { - if(version != null) { - switch(version) { - case ENABLED: - return true; - case NOT_DEFINED: - case DISABLED: - default: - return false; - } - } - return false; - } - - /** - * Converts Bucket versioning flag into OzoneConts.Versioning. - * - * @param flag versioning flag - * @return corresponding OzoneConts.Versionin - */ - private OzoneConsts.Versioning getBucketVersioning(Boolean flag) { - if(flag != null) { - if(flag) { - return OzoneConsts.Versioning.ENABLED; - } else { - return OzoneConsts.Versioning.DISABLED; - } - } - return OzoneConsts.Versioning.NOT_DEFINED; - } - - @Override - public void close() throws IOException { - httpClient.close(); - } - - private void addQueryParamter(String param, String value, - URIBuilder builder) { - if (!Strings.isNullOrEmpty(value)) { - builder.addParameter(param, value); - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java deleted file mode 100644 index fbd6eb8ea9a9b..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; - -import java.util.List; - -/** - * The implementor of this interface should select the REST server which will - * be used by the client to connect to Ozone Cluster, given list of - * REST Servers/DataNodes (DataNodes are the ones which hosts REST Service). - */ -public interface RestServerSelector { - - /** - * Returns the REST Service which will be used by the client for connection. - * - * @param restServices list of available REST servers - * @return ServiceInfo - */ - ServiceInfo getRestServer(List restServices); - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java deleted file mode 100644 index 233e7882e2d69..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest.exceptions; - -/** - * This package contains ozone rest client libraries. - */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java deleted file mode 100644 index 340709f492dc8..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -@InterfaceAudience.Private -package org.apache.hadoop.ozone.client.rest.headers; - -import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java deleted file mode 100644 index ebcc104811f82..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -/** - * This package contains Ozone rest client library classes. - */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java deleted file mode 100644 index 330eba878bfc9..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ /dev/null @@ -1,576 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.OzoneClientUtils; -import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; -import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ServicePort; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolPB; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.*; -import java.util.stream.Collectors; - -/** - * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode - * to execute client calls. This uses RPC protocol for communication - * with the servers. - */ -public class RpcClient implements ClientProtocol { - - private static final Logger LOG = - LoggerFactory.getLogger(RpcClient.class); - - private final OzoneConfiguration conf; - private final StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private final OzoneManagerProtocolClientSideTranslatorPB - ozoneManagerClient; - private final XceiverClientManager xceiverClientManager; - private final int chunkSize; - private final UserGroupInformation ugi; - private final OzoneAcl.OzoneACLRights userRights; - private final OzoneAcl.OzoneACLRights groupRights; - private final RetryPolicy retryPolicy; - - /** - * Creates RpcClient instance with the given configuration. - * @param conf - * @throws IOException - */ - public RpcClient(Configuration conf) throws IOException { - Preconditions.checkNotNull(conf); - this.conf = new OzoneConfiguration(conf); - this.ugi = UserGroupInformation.getCurrentUser(); - this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, - OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); - this.groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS, - OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); - InetSocketAddress omAddress = OmUtils - .getOmAddressForClients(conf); - RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, - ProtobufRpcEngine.class); - this.ozoneManagerClient = - new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - omAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - - long scmVersion = - RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddress = getScmAddressForClient(); - RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - this.storageContainerLocationClient = - new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion, - scmAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - - this.xceiverClientManager = new XceiverClientManager(conf); - retryPolicy = OzoneClientUtils.createRetryPolicy(conf); - - int configuredChunkSize = conf.getInt( - ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, - ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT); - if(configuredChunkSize > ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE) { - LOG.warn("The chunk size ({}) is not allowed to be more than" - + " the maximum size ({})," - + " resetting to the maximum size.", - configuredChunkSize, ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE); - chunkSize = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE; - } else { - chunkSize = configuredChunkSize; - } - } - - private InetSocketAddress getScmAddressForClient() throws IOException { - List services = ozoneManagerClient.getServiceList(); - ServiceInfo scmInfo = services.stream().filter( - a -> a.getNodeType().equals(HddsProtos.NodeType.SCM)) - .collect(Collectors.toList()).get(0); - return NetUtils.createSocketAddr(scmInfo.getHostname()+ ":" + - scmInfo.getPort(ServicePort.Type.RPC)); - } - - @Override - public void createVolume(String volumeName) throws IOException { - createVolume(volumeName, VolumeArgs.newBuilder().build()); - } - - @Override - public void createVolume(String volumeName, VolumeArgs volArgs) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(volArgs); - - String admin = volArgs.getAdmin() == null ? - ugi.getUserName() : volArgs.getAdmin(); - String owner = volArgs.getOwner() == null ? - ugi.getUserName() : volArgs.getOwner(); - long quota = volArgs.getQuota() == null ? - OzoneConsts.MAX_QUOTA_IN_BYTES : - OzoneQuota.parseQuota(volArgs.getQuota()).sizeInBytes(); - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, - owner, userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(owner).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); - //ACLs from VolumeArgs - if(volArgs.getAcls() != null) { - listOfAcls.addAll(volArgs.getAcls()); - } - - OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); - builder.setVolume(volumeName); - builder.setAdminName(admin); - builder.setOwnerName(owner); - builder.setQuotaInBytes(quota); - - //Remove duplicates and add ACLs - for (OzoneAcl ozoneAcl : - listOfAcls.stream().distinct().collect(Collectors.toList())) { - builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl)); - } - - LOG.info("Creating Volume: {}, with {} as owner and quota set to {} bytes.", - volumeName, owner, quota); - ozoneManagerClient.createVolume(builder.build()); - } - - @Override - public void setVolumeOwner(String volumeName, String owner) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(owner); - ozoneManagerClient.setOwner(volumeName, owner); - } - - @Override - public void setVolumeQuota(String volumeName, OzoneQuota quota) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(quota); - long quotaInBytes = quota.sizeInBytes(); - ozoneManagerClient.setQuota(volumeName, quotaInBytes); - } - - @Override - public OzoneVolume getVolumeDetails(String volumeName) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - OmVolumeArgs volume = ozoneManagerClient.getVolumeInfo(volumeName); - return new OzoneVolume( - conf, - this, - volume.getVolume(), - volume.getAdminName(), - volume.getOwnerName(), - volume.getQuotaInBytes(), - volume.getCreationTime(), - volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList())); - } - - @Override - public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void deleteVolume(String volumeName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - ozoneManagerClient.deleteVolume(volumeName); - } - - @Override - public List listVolumes(String volumePrefix, String prevVolume, - int maxListResult) - throws IOException { - List volumes = ozoneManagerClient.listAllVolumes( - volumePrefix, prevVolume, maxListResult); - - return volumes.stream().map(volume -> new OzoneVolume( - conf, - this, - volume.getVolume(), - volume.getAdminName(), - volume.getOwnerName(), - volume.getQuotaInBytes(), - volume.getCreationTime(), - volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList()))) - .collect(Collectors.toList()); - } - - @Override - public List listVolumes(String user, String volumePrefix, - String prevVolume, int maxListResult) - throws IOException { - List volumes = ozoneManagerClient.listVolumeByUser( - user, volumePrefix, prevVolume, maxListResult); - - return volumes.stream().map(volume -> new OzoneVolume( - conf, - this, - volume.getVolume(), - volume.getAdminName(), - volume.getOwnerName(), - volume.getQuotaInBytes(), - volume.getCreationTime(), - volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList()))) - .collect(Collectors.toList()); - } - - @Override - public void createBucket(String volumeName, String bucketName) - throws IOException { - createBucket(volumeName, bucketName, BucketArgs.newBuilder().build()); - } - - @Override - public void createBucket( - String volumeName, String bucketName, BucketArgs bucketArgs) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(bucketArgs); - - Boolean isVersionEnabled = bucketArgs.getVersioning() == null ? - Boolean.FALSE : bucketArgs.getVersioning(); - StorageType storageType = bucketArgs.getStorageType() == null ? - StorageType.DEFAULT : bucketArgs.getStorageType(); - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, - ugi.getUserName(), userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(ugi.getUserName()).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); - //ACLs from BucketArgs - if(bucketArgs.getAcls() != null) { - listOfAcls.addAll(bucketArgs.getAcls()); - } - - OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setIsVersionEnabled(isVersionEnabled) - .setStorageType(storageType) - .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())); - - LOG.info("Creating Bucket: {}/{}, with Versioning {} and " + - "Storage Type set to {}", volumeName, bucketName, isVersionEnabled, - storageType); - ozoneManagerClient.createBucket(builder.build()); - } - - @Override - public void addBucketAcls( - String volumeName, String bucketName, List addAcls) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(addAcls); - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setAddAcls(addAcls); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void removeBucketAcls( - String volumeName, String bucketName, List removeAcls) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(removeAcls); - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setRemoveAcls(removeAcls); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void setBucketVersioning( - String volumeName, String bucketName, Boolean versioning) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(versioning); - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setIsVersionEnabled(versioning); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void setBucketStorageType( - String volumeName, String bucketName, StorageType storageType) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(storageType); - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setStorageType(storageType); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void deleteBucket( - String volumeName, String bucketName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - ozoneManagerClient.deleteBucket(volumeName, bucketName); - } - - @Override - public void checkBucketAccess( - String volumeName, String bucketName) throws IOException { - - } - - @Override - public OzoneBucket getBucketDetails( - String volumeName, String bucketName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - OmBucketInfo bucketArgs = - ozoneManagerClient.getBucketInfo(volumeName, bucketName); - return new OzoneBucket( - conf, - this, - bucketArgs.getVolumeName(), - bucketArgs.getBucketName(), - bucketArgs.getAcls(), - bucketArgs.getStorageType(), - bucketArgs.getIsVersionEnabled(), - bucketArgs.getCreationTime()); - } - - @Override - public List listBuckets(String volumeName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException { - List buckets = ozoneManagerClient.listBuckets( - volumeName, prevBucket, bucketPrefix, maxListResult); - - return buckets.stream().map(bucket -> new OzoneBucket( - conf, - this, - bucket.getVolumeName(), - bucket.getBucketName(), - bucket.getAcls(), - bucket.getStorageType(), - bucket.getIsVersionEnabled(), - bucket.getCreationTime())) - .collect(Collectors.toList()); - } - - @Override - public OzoneOutputStream createKey( - String volumeName, String bucketName, String keyName, long size, - ReplicationType type, ReplicationFactor factor) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, type, factor); - String requestId = UUID.randomUUID().toString(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(size) - .setType(HddsProtos.ReplicationType.valueOf(type.toString())) - .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) - .build(); - - OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); - ChunkGroupOutputStream groupOutputStream = - new ChunkGroupOutputStream.Builder() - .setHandler(openKey) - .setXceiverClientManager(xceiverClientManager) - .setScmClient(storageContainerLocationClient) - .setOmClient(ozoneManagerClient) - .setChunkSize(chunkSize) - .setRequestID(requestId) - .setType(HddsProtos.ReplicationType.valueOf(type.toString())) - .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) - .setRetryPolicy(retryPolicy) - .build(); - groupOutputStream.addPreallocateBlocks( - openKey.getKeyInfo().getLatestVersionLocations(), - openKey.getOpenVersion()); - return new OzoneOutputStream(groupOutputStream); - } - - @Override - public OzoneInputStream getKey( - String volumeName, String bucketName, String keyName) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - String requestId = UUID.randomUUID().toString(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - LengthInputStream lengthInputStream = - ChunkGroupInputStream.getFromOmKeyInfo( - keyInfo, xceiverClientManager, storageContainerLocationClient, - requestId); - return new OzoneInputStream(lengthInputStream.getWrappedStream()); - } - - @Override - public void deleteKey( - String volumeName, String bucketName, String keyName) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - ozoneManagerClient.deleteKey(keyArgs); - } - - @Override - public void renameKey(String volumeName, String bucketName, - String fromKeyName, String toKeyName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(fromKeyName, toKeyName); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(fromKeyName) - .build(); - ozoneManagerClient.renameKey(keyArgs, toKeyName); - } - - @Override - public List listKeys(String volumeName, String bucketName, - String keyPrefix, String prevKey, - int maxListResult) - throws IOException { - List keys = ozoneManagerClient.listKeys( - volumeName, bucketName, prevKey, keyPrefix, maxListResult); - - return keys.stream().map(key -> new OzoneKey( - key.getVolumeName(), - key.getBucketName(), - key.getKeyName(), - key.getDataSize(), - key.getCreationTime(), - key.getModificationTime())) - .collect(Collectors.toList()); - } - - @Override - public OzoneKeyDetails getKeyDetails( - String volumeName, String bucketName, String keyName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(keyName); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - - List ozoneKeyLocations = new ArrayList<>(); - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().forEach( - (a) -> ozoneKeyLocations.add(new OzoneKeyLocation(a.getContainerID(), - a.getLocalID(), a.getLength(), a.getOffset()))); - return new OzoneKeyDetails(keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getKeyName(), - keyInfo.getDataSize(), - keyInfo.getCreationTime(), - keyInfo.getModificationTime(), - ozoneKeyLocations); - } - - @Override - public void close() throws IOException { - IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient); - IOUtils.cleanupWithLogger(LOG, ozoneManagerClient); - IOUtils.cleanupWithLogger(LOG, xceiverClientManager); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java deleted file mode 100644 index 0fcc3fc35837c..0000000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -/** - * This package contains Ozone rpc client library classes. - */ diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java deleted file mode 100644 index 3aefe8ac2385d..0000000000000 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; - -import java.net.InetSocketAddress; - -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.OmUtils.getOmAddress; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; - -/** - * This test class verifies the parsing of SCM endpoint config settings. The - * parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}. - */ -public class TestHddsClientUtils { - @Rule - public Timeout timeout = new Timeout(300000); - - @Rule - public ExpectedException thrown= ExpectedException.none(); - - /** - * Verify client endpoint lookup failure if it is not configured. - */ - @Test - public void testMissingScmClientAddress() { - final Configuration conf = new OzoneConfiguration(); - thrown.expect(IllegalArgumentException.class); - getScmAddressForClients(conf); - } - - /** - * Verify that the client endpoint can be correctly parsed from - * configuration. - */ - @Test - public void testGetScmClientAddress() { - final Configuration conf = new OzoneConfiguration(); - - // First try a client address with just a host name. Verify it falls - // back to the default port. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = getScmAddressForClients(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - - // Next try a client address with a host name and port. Verify both - // are used correctly. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - addr = getScmAddressForClients(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(100)); - } - - @Test - public void testGetOmAddress() { - final Configuration conf = new OzoneConfiguration(); - - // First try a client address with just a host name. Verify it falls - // back to the default port. - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = getOmAddress(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT)); - - // Next try a client address with just a host name and port. Verify the port - // is ignored and the default OM port is used. - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4:100"); - addr = getOmAddress(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(100)); - - // Assert the we are able to use default configs if no value is specified. - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, ""); - addr = getOmAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT)); - } -} diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index be63eab0c7ac4..0000000000000 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * This package contains test classes for Ozone Client. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index df58f3650a438..0000000000000 --- a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml deleted file mode 100644 index 942576b610a5a..0000000000000 --- a/hadoop-ozone/common/pom.xml +++ /dev/null @@ -1,115 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.3.0-SNAPSHOT - - hadoop-ozone-common - 0.3.0-SNAPSHOT - Apache Hadoop Ozone Common - Apache Hadoop Ozone Common - jar - - - - - - - - - ${basedir}/src/main/resources - - ozone-version-info.properties - - false - - - ${basedir}/src/main/resources - - ozone-version-info.properties - - true - - - - - org.apache.hadoop - hadoop-maven-plugins - - - version-info - generate-resources - - version-info - - - - ${basedir}/../ - - */src/main/java/**/*.java - */src/main/proto/*.proto - - - - - - compile-protoc - - protoc - - - ${protobuf.version} - ${protoc.path} - - - ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto - - - ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ - - - ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ - - - ${basedir}/../../hadoop-hdds/common/src/main/proto/ - - ${basedir}/src/main/proto - - - ${basedir}/src/main/proto - - OzoneManagerProtocol.proto - - - - - - - - org.codehaus.mojo - findbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone deleted file mode 100755 index 4b50771176526..0000000000000 --- a/hadoop-ozone/common/src/main/bin/ozone +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# The name of the script being executed. -HADOOP_SHELL_EXECNAME="ozone" -MYNAME="${BASH_SOURCE-$0}" - -## @description build up the hdfs command's usage text. -## @audience public -## @stability stable -## @replaceable no -function hadoop_usage -{ - hadoop_add_option "--buildpaths" "attempt to add class files from build tree" - hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon" - hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker mode" - hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" - hadoop_add_option "--loglevel level" "set the log4j level for this command" - hadoop_add_option "--workers" "turn on worker mode" - - hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" - hadoop_add_subcommand "datanode" daemon "run a HDDS datanode" - hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" - hadoop_add_subcommand "freon" client "runs an ozone data generator" - hadoop_add_subcommand "fs" client "run a filesystem command on Ozone file system. Equivalent to 'hadoop fs'" - hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path" - hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning." - hadoop_add_subcommand "getozoneconf" client "get ozone config values from configuration" - hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." - hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data" - hadoop_add_subcommand "om" daemon "Ozone Manager" - hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" - hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway" - hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager" - hadoop_add_subcommand "sh" client "command line interface for object store operations" - hadoop_add_subcommand "version" client "print the version" - - hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false -} - -## @description Default command handler for hadoop command -## @audience public -## @stability stable -## @replaceable no -## @param CLI arguments -function ozonecmd_case -{ - subcmd=$1 - shift - - case ${subcmd} in - classpath) - hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@" - ;; - datanode) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME=org.apache.hadoop.ozone.HddsDatanodeService - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-datanode" - ;; - envvars) - echo "JAVA_HOME='${JAVA_HOME}'" - echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'" - echo "HDFS_DIR='${HDFS_DIR}'" - echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'" - echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'" - echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'" - echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'" - echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" - if [[ -n "${QATESTMODE}" ]]; then - echo "MYNAME=${MYNAME}" - echo "HADOOP_SHELL_EXECNAME=${HADOOP_SHELL_EXECNAME}" - fi - exit 0 - ;; - freon) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - genesis) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - getozoneconf) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf; - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - om) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManager - HDFS_OM_OPTS="${HDFS_OM_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/om-audit-log4j2.properties" - HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager" - ;; - sh | shell) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager" - ;; - scm) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManager' - hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto HADOOP_OPTS" - HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-hdds-server-scm" - ;; - s3g) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.ozone.s3.Gateway' - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-s3gateway" - ;; - fs) - HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - scmcli) - HADOOP_CLASSNAME=org.apache.hadoop.hdds.scm.cli.SCMCLI - OZONE_RUN_ARTIFACT_NAME="hadoop-hdds-tools" - ;; - version) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.util.OzoneVersionInfo - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-common" - ;; - genconf) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - *) - HADOOP_CLASSNAME="${subcmd}" - if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then - hadoop_exit_with_usage 1 - fi - ;; - esac -} - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P) - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then - # shellcheck source=./hadoop-ozone/common/src/main/bin/ozone-config.sh - . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1 - exit 1 -fi - -# now that we have support code, let's abs MYNAME so we can use it later -MYNAME=$(hadoop_abs "${MYNAME}") - -if [[ $# = 0 ]]; then - hadoop_exit_with_usage 1 -fi - -HADOOP_SUBCMD=$1 -shift - - -if hadoop_need_reexec ozone "${HADOOP_SUBCMD}"; then - hadoop_uservar_su ozone "${HADOOP_SUBCMD}" \ - "${MYNAME}" \ - "--reexec" \ - "${HADOOP_USER_PARAMS[@]}" - exit $? -fi - -hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" - -HADOOP_SUBCMD_ARGS=("$@") - -if declare -f ozone_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then - hadoop_debug "Calling dynamically: ozone_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}" - "ozone_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" -else - ozonecmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" -fi - - -# -# Setting up classpath based on the generate classpath descriptors -# -if [ ! "$OZONE_RUN_ARTIFACT_NAME" ]; then - echo "ERROR: Ozone components require to set OZONE_RUN_ARTIFACT_NAME to set the classpath" - exit -1 -fi -export HDDS_LIB_JARS_DIR="${HADOOP_HDFS_HOME}/share/ozone/lib" -CLASSPATH_FILE="${HADOOP_HDFS_HOME}/share/ozone/classpath/${OZONE_RUN_ARTIFACT_NAME}.classpath" -if [ ! "$CLASSPATH_FILE" ]; then - echo "ERROR: Classpath file descriptor $CLASSPATH_FILE is missing" - exit -1 -fi -# shellcheck disable=SC1090,SC2086 -source $CLASSPATH_FILE -OIFS=$IFS -IFS=':' -# shellcheck disable=SC2154 -for jar in $classpath; do - hadoop_add_classpath "$jar" -done -hadoop_add_classpath "${HADOOP_HDFS_HOME}/share/ozone/web" - -#We need to add the artifact manually as it's not part the generated classpath desciptor -ARTIFACT_LIB_DIR="${HADOOP_HDFS_HOME}/share/ozone/lib" -MAIN_ARTIFACT=$(find "$ARTIFACT_LIB_DIR" -name "${OZONE_RUN_ARTIFACT_NAME}-*.jar") -if [ ! "$MAIN_ARTIFACT" ]; then - echo "ERROR: Component jar file $MAIN_ARTIFACT is missing from ${HADOOP_HDFS_HOME}/share/ozone/lib" -fi -hadoop_add_classpath "${MAIN_ARTIFACT}" -IFS=$OIFS - - -hadoop_add_client_opts - -if [[ ${HADOOP_WORKER_MODE} = true ]]; then - hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/ozone" "${HADOOP_USER_PARAMS[@]}" - exit $? -fi - -hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" - -# everything is in globals at this point, so call the generic handler -hadoop_generic_java_subcmd_handler diff --git a/hadoop-ozone/common/src/main/bin/ozone-config.sh b/hadoop-ozone/common/src/main/bin/ozone-config.sh deleted file mode 100755 index d179a331ae990..0000000000000 --- a/hadoop-ozone/common/src/main/bin/ozone-config.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# included in all the ozone scripts with source command -# should not be executed directly - -function hadoop_subproject_init -{ - if [[ -z "${HADOOP_OZONE_ENV_PROCESSED}" ]]; then - if [[ -e "${HADOOP_CONF_DIR}/ozone-env.sh" ]]; then - . "${HADOOP_CONF_DIR}/ozone-env.sh" - export HADOOP_OZONE_ENV_PROCESSED=true - fi - fi - HADOOP_OZONE_HOME="${HADOOP_OZONE_HOME:-$HADOOP_HOME}" - -} - -if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then - _hd_this="${BASH_SOURCE-$0}" - HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P) -fi - -# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh - -if [[ -n "${HADOOP_COMMON_HOME}" ]] && - [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then - . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" -elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then - . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" -elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then - . "${HADOOP_HOME}/libexec/hadoop-config.sh" -else - echo "ERROR: Hadoop common not found." 2>&1 - exit 1 -fi - diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh deleted file mode 100755 index cfb54e033252a..0000000000000 --- a/hadoop-ozone/common/src/main/bin/start-ozone.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Start hadoop hdfs and ozone daemons. -# Run this on master node. -## @description usage info -## @audience private -## @stability evolving -## @replaceable no -function hadoop_usage -{ - echo "Usage: start-ozone.sh" -} - -this="${BASH_SOURCE-$0}" -bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1 - exit 1 -fi - -# get arguments -if [[ $# -ge 1 ]]; then - startOpt="$1" - shift - case "$startOpt" in - -upgrade) - nameStartOpt="$startOpt" - ;; - -rollback) - dataStartOpt="$startOpt" - ;; - *) - hadoop_exit_with_usage 1 - ;; - esac -fi - -#Add other possible options -nameStartOpt="$nameStartOpt $*" - -SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-) -SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-) - -if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then - echo "Ozone is not supported in a security enabled cluster." - exit 1 -fi - -#--------------------------------------------------------- -# Check if ozone is enabled -OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-) -if [[ "${OZONE_ENABLED}" != "true" ]]; then - echo "Operation is not supported because ozone is not enabled." - exit -1 -fi - -#--------------------------------------------------------- -# datanodes (using default workers file) - -echo "Starting datanodes" -hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --daemon start \ - datanode ${dataStartOpt} -(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? )) - -#--------------------------------------------------------- -# Ozone ozonemanager nodes -OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null) -echo "Starting Ozone Manager nodes [${OM_NODES}]" -if [[ "${OM_NODES}" == "0.0.0.0" ]]; then - OM_NODES=$(hostname) -fi - -hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${OM_NODES}" \ - --daemon start \ - om - -HADOOP_JUMBO_RETCOUNTER=$? - -#--------------------------------------------------------- -# Ozone storagecontainermanager nodes -SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -storagecontainermanagers 2>/dev/null) -echo "Starting storage container manager nodes [${SCM_NODES}]" -hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${SCM_NODES}" \ - --daemon start \ - scm - -(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? )) - -exit ${HADOOP_JUMBO_RETCOUNTER} diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh deleted file mode 100755 index 97e1df4df365e..0000000000000 --- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Stop hdfs and ozone daemons. -# Run this on master node. -## @description usage info -## @audience private -## @stability evolving -## @replaceable no -function hadoop_usage -{ - echo "Usage: stop-ozone.sh" -} - -this="${BASH_SOURCE-$0}" -bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1 - exit 1 -fi - -SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-) -SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-) - -if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then - echo "Ozone is not supported in a security enabled cluster." - exit 1 -fi - -#--------------------------------------------------------- -# Check if ozone is enabled -OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-) -if [[ "${OZONE_ENABLED}" != "true" ]]; then - echo "Operation is not supported because ozone is not enabled." - exit -1 -fi - -#--------------------------------------------------------- -# datanodes (using default workers file) - -echo "Stopping datanodes" - -hadoop_uservar_su ozone datanode "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --daemon stop \ - datanode - -#--------------------------------------------------------- -# Ozone Manager nodes -OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null) -echo "Stopping Ozone Manager nodes [${OM_NODES}]" -if [[ "${OM_NODES}" == "0.0.0.0" ]]; then - OM_NODES=$(hostname) -fi - -hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${OM_NODES}" \ - --daemon stop \ - om - -#--------------------------------------------------------- -# Ozone storagecontainermanager nodes -SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -storagecontainermanagers 2>/dev/null) -echo "Stopping storage container manager nodes [${SCM_NODES}]" -hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${SCM_NODES}" \ - --daemon stop \ - scm \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties deleted file mode 100644 index 7d097a081a223..0000000000000 --- a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties +++ /dev/null @@ -1,86 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=30 - -filter=read,write -# filter.read.onMatch=DENY avoids logging all READ events -# filter.read.onMatch=ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch=NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type=MarkerFilter -filter.read.marker=READ -filter.read.onMatch=DENY -filter.read.onMismatch=NEUTRAL - -# filter.write.onMatch=DENY avoids logging all WRITE events -# filter.write.onMatch=ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type=MarkerFilter -filter.write.marker=WRITE -filter.write.onMatch=NEUTRAL -filter.write.onMismatch=NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -appenders=console, rolling -appender.console.type=Console -appender.console.name=STDOUT -appender.console.layout.type=PatternLayout -appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n - -#Rolling File Appender with size & time thresholds. -#Rolling is triggered when either threshold is breached. -#The rolled over file is compressed by default -#Time interval is specified in seconds 86400s=1 day -appender.rolling.type=RollingFile -appender.rolling.name=RollingFile -appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log -appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -appender.rolling.layout.type=PatternLayout -appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -appender.rolling.policies.type=Policies -appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -appender.rolling.policies.time.interval=86400 -appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -appender.rolling.policies.size.size=64MB - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=OMAudit -logger.audit.level=INFO -logger.audit.appenderRefs=rolling -logger.audit.appenderRef.file.ref=RollingFile - -rootLogger.level=INFO -rootLogger.appenderRefs=stdout -rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/common/src/main/conf/ozone-site.xml b/hadoop-ozone/common/src/main/conf/ozone-site.xml deleted file mode 100644 index 77dd7ef994026..0000000000000 --- a/hadoop-ozone/common/src/main/conf/ozone-site.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java deleted file mode 100644 index 097410405f068..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone; - -import java.net.InetSocketAddress; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; - -import com.google.common.base.Optional; -import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; -import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT; - -/** - * Stateless helper functions for the server and client side of OM - * communication. - */ -public final class OmUtils { - - private OmUtils() { - } - - /** - * Retrieve the socket address that is used by OM. - * @param conf - * @return Target InetSocketAddress for the SCM service endpoint. - */ - public static InetSocketAddress getOmAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - OZONE_OM_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.or(OZONE_OM_BIND_HOST_DEFAULT) + ":" + - getOmRpcPort(conf)); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to OM. - * @param conf - * @return Target InetSocketAddress for the OM service endpoint. - */ - public static InetSocketAddress getOmAddressForClients( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - OZONE_OM_ADDRESS_KEY); - - if (!host.isPresent()) { - throw new IllegalArgumentException( - OZONE_OM_ADDRESS_KEY + " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration for" + - " details on configuring Ozone."); - } - - return NetUtils.createSocketAddr( - host.get() + ":" + getOmRpcPort(conf)); - } - - public static int getOmRpcPort(Configuration conf) { - // If no port number is specified then we'll just try the defaultBindPort. - final Optional port = getPortNumberFromConfigKeys(conf, - OZONE_OM_ADDRESS_KEY); - return port.or(OZONE_OM_PORT_DEFAULT); - } - - public static int getOmRestPort(Configuration conf) { - // If no port number is specified then we'll just try the default - // HTTP BindPort. - final Optional port = - getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY); - return port.or(OZONE_OM_HTTP_BIND_PORT_DEFAULT); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java deleted file mode 100644 index a0ae455303e59..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Audit Action types for OzoneManager. - */ -public enum OMAction implements AuditAction { - - // WRITE Actions - ALLOCATE_BLOCK("ALLOCATE_BLOCK"), - ALLOCATE_KEY("ALLOCATE_KEY"), - COMMIT_KEY("COMMIT_KEY"), - CREATE_VOLUME("CREATE_VOLUME"), - CREATE_BUCKET("CREATE_BUCKET"), - CREATE_KEY("CREATE_KEY"), - DELETE_VOLUME("DELETE_VOLUME"), - DELETE_BUCKET("DELETE_BUCKET"), - DELETE_KEY("DELETE_KEY"), - RENAME_KEY("RENAME_KEY"), - SET_OWNER("SET_OWNER"), - SET_QUOTA("SET_QUOTA"), - UPDATE_VOLUME("UPDATE_VOLUME"), - UPDATE_BUCKET("UPDATE_BUCKET"), - UPDATE_KEY("UPDATE_KEY"), - // READ Actions - CHECK_VOLUME_ACCESS("CHECK_VOLUME_ACCESS"), - LIST_BUCKETS("LIST_BUCKETS"), - LIST_VOLUMES("LIST_VOLUMES"), - LIST_KEYS("LIST_KEYS"), - READ_VOLUME("READ_VOLUME"), - READ_BUCKET("READ_BUCKET"), - READ_KEY("READ_BUCKET"); - - private String action; - - OMAction(String action) { - this.action = action; - } - - @Override - public String getAction() { - return this.action; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java deleted file mode 100644 index 0f887909d491d..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; -/** - * This package defines OMAction - an implementation of AuditAction - * OMAction defines audit action types for various actions that will be - * audited in OzoneManager. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java deleted file mode 100644 index baf1887c4687e..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.io; - -import java.io.FilterInputStream; -import java.io.InputStream; - -/** - * An input stream with length. - */ -public class LengthInputStream extends FilterInputStream { - - private final long length; - - /** - * Create an stream. - * @param in the underlying input stream. - * @param length the length of the stream. - */ - public LengthInputStream(InputStream in, long length) { - super(in); - this.length = length; - } - - /** @return the length. */ - public long getLength() { - return length; - } - - public InputStream getWrappedStream() { - return in; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java deleted file mode 100644 index ece1ff4463c54..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -/** - * IO related ozone helper classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java deleted file mode 100644 index 953e3991f7bd2..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; - -/** - * Class the represents various errors returned by the - * Ozone Layer. - */ -@InterfaceAudience.Private -public class OzoneException extends Exception { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(OzoneException.class); - private static final ObjectMapper MAPPER; - - static { - MAPPER = new ObjectMapper(); - MAPPER.setVisibility( - MAPPER.getSerializationConfig().getDefaultVisibilityChecker() - .withCreatorVisibility(JsonAutoDetect.Visibility.NONE) - .withFieldVisibility(JsonAutoDetect.Visibility.NONE) - .withGetterVisibility(JsonAutoDetect.Visibility.NONE) - .withIsGetterVisibility(JsonAutoDetect.Visibility.NONE) - .withSetterVisibility(JsonAutoDetect.Visibility.NONE)); - } - - @JsonProperty("httpCode") - private long httpCode; - @JsonProperty("shortMessage") - private String shortMessage; - @JsonProperty("resource") - private String resource; - @JsonProperty("message") - private String message; - @JsonProperty("requestID") - private String requestId; - @JsonProperty("hostName") - private String hostID; - - /** - * Constructs a new exception with {@code null} as its detail message. The - * cause is not initialized, and may subsequently be initialized by a call - * to {@link #initCause}. - * - * This constructor is needed by Json Serializer. - */ - public OzoneException() { - } - - - /** - * Constructor that allows a shortMessage and exception. - * - * @param httpCode Error Code - * @param shortMessage Short Message - * @param ex Exception - */ - public OzoneException(long httpCode, String shortMessage, Exception ex) { - super(ex); - this.message = ex.getMessage(); - this.shortMessage = shortMessage; - this.httpCode = httpCode; - } - - - /** - * Constructor that allows a shortMessage. - * - * @param httpCode Error Code - * @param shortMessage Short Message - */ - public OzoneException(long httpCode, String shortMessage) { - this.shortMessage = shortMessage; - this.httpCode = httpCode; - } - - /** - * Constructor that allows a shortMessage and long message. - * - * @param httpCode Error Code - * @param shortMessage Short Message - * @param message long error message - */ - public OzoneException(long httpCode, String shortMessage, String message) { - this.shortMessage = shortMessage; - this.message = message; - this.httpCode = httpCode; - } - - /** - * Constructor that allows a shortMessage, a long message and an exception. - * - * @param httpCode Error code - * @param shortMessage Short message - * @param message Long error message - * @param ex Exception - */ - public OzoneException(long httpCode, String shortMessage, - String message, Exception ex) { - super(ex); - this.shortMessage = shortMessage; - this.message = message; - this.httpCode = httpCode; - } - - /** - * Returns the Resource that was involved in the stackTraceString. - * - * @return String - */ - public String getResource() { - return resource; - } - - /** - * Sets Resource. - * - * @param resourceName - Name of the Resource - */ - public void setResource(String resourceName) { - this.resource = resourceName; - } - - /** - * Gets a detailed message for the error. - * - * @return String - */ - public String getMessage() { - return message; - } - - /** - * Sets the error message. - * - * @param longMessage - Long message - */ - public void setMessage(String longMessage) { - this.message = longMessage; - } - - /** - * Returns request Id. - * - * @return String - */ - public String getRequestId() { - return requestId; - } - - /** - * Sets request ID. - * - * @param ozoneRequestId Request ID generated by the Server - */ - public void setRequestId(String ozoneRequestId) { - this.requestId = ozoneRequestId; - } - - /** - * Returns short error string. - * - * @return String - */ - public String getShortMessage() { - return shortMessage; - } - - /** - * Sets short error string. - * - * @param shortError Short Error Code - */ - public void setShortMessage(String shortError) { - this.shortMessage = shortError; - } - - /** - * Returns hostID. - * - * @return String - */ - public String getHostID() { - return hostID; - } - - /** - * Sets host ID. - * - * @param hostName host Name - */ - public void setHostID(String hostName) { - this.hostID = hostName; - } - - /** - * Returns http error code. - * - * @return long - */ - public long getHttpCode() { - return httpCode; - } - - /** - * Sets http status. - * - * @param httpStatus http error code. - */ - public void setHttpCode(long httpStatus) { - this.httpCode = httpStatus; - } - - /** - * Returns a Json String. - * - * @return JSON representation of the Error - */ - public String toJsonString() { - try { - return MAPPER.writeValueAsString(this); - } catch (IOException ex) { - // TODO : Log this error on server side. - } - // TODO : Replace this with a JSON Object -- That represents this error. - return "500 Internal Server Error"; - } - - /** - * Parses an Exception record. - * - * @param jsonString - Exception in Json format. - * - * @return OzoneException Object - * - * @throws IOException - */ - public static OzoneException parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java deleted file mode 100644 index 3e404937061c5..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.headers; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * OZONE specific HTTP headers. - */ -@InterfaceAudience.Private -public final class Header { - public static final String OZONE_QUOTA_BYTES = "BYTES"; - public static final String OZONE_QUOTA_MB = "MB"; - public static final String OZONE_QUOTA_GB = "GB"; - public static final String OZONE_QUOTA_TB = "TB"; - public static final String OZONE_QUOTA_REMOVE = "remove"; - public static final String OZONE_QUOTA_UNDEFINED = "undefined"; - public static final String OZONE_EMPTY_STRING=""; - public static final String OZONE_DEFAULT_LIST_SIZE = "1000"; - - public static final String OZONE_USER = "x-ozone-user"; - public static final String OZONE_SIMPLE_AUTHENTICATION_SCHEME = "OZONE"; - public static final String OZONE_VERSION_HEADER = "x-ozone-version"; - public static final String OZONE_V1_VERSION_HEADER ="v1"; - - public static final String OZONE_LIST_QUERY_SERVICE = "service"; - - public static final String OZONE_INFO_QUERY_VOLUME = "volume"; - public static final String OZONE_INFO_QUERY_BUCKET = "bucket"; - public static final String OZONE_INFO_QUERY_KEY = "key"; - public static final String OZONE_INFO_QUERY_KEY_DETAIL = "key-detail"; - - public static final String OZONE_REQUEST_ID = "x-ozone-request-id"; - public static final String OZONE_SERVER_NAME = "x-ozone-server-name"; - - public static final String OZONE_STORAGE_TYPE = "x-ozone-storage-type"; - - public static final String OZONE_BUCKET_VERSIONING = - "x-ozone-bucket-versioning"; - - public static final String OZONE_ACLS = "x-ozone-acls"; - public static final String OZONE_ACL_ADD = "ADD"; - public static final String OZONE_ACL_REMOVE = "REMOVE"; - - public static final String OZONE_INFO_QUERY_TAG ="info"; - public static final String OZONE_QUOTA_QUERY_TAG ="quota"; - public static final String CONTENT_MD5 = "Content-MD5"; - public static final String OZONE_LIST_QUERY_PREFIX="prefix"; - public static final String OZONE_LIST_QUERY_MAXKEYS="max-keys"; - public static final String OZONE_LIST_QUERY_PREVKEY="prev-key"; - public static final String OZONE_LIST_QUERY_ROOTSCAN="root-scan"; - - public static final String OZONE_RENAME_TO_KEY_PARAM_NAME = "toKey"; - - private Header() { - // Never constructed. - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java deleted file mode 100644 index 76bc206e53d82..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest.headers; - -/** - * Ozone HTTP Header utility. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java deleted file mode 100644 index fc86dbb05767f..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest; - -/** - * Ozone REST interface. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java deleted file mode 100644 index af89b39efb2a7..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; - -/** - * BucketInfo class is used used for parsing json response - * when BucketInfo Call is made. - */ -public class BucketInfo implements Comparable { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(BucketInfo.class); - - private String volumeName; - private String bucketName; - private String createdOn; - private List acls; - private OzoneConsts.Versioning versioning; - private StorageType storageType; - - /** - * Constructor for BucketInfo. - * - * @param volumeName - * @param bucketName - */ - public BucketInfo(String volumeName, String bucketName) { - this.volumeName = volumeName; - this.bucketName = bucketName; - } - - - /** - * Default constructor for BucketInfo. - */ - public BucketInfo() { - acls = new LinkedList<>(); - } - - /** - * Parse a JSON string into BucketInfo Object. - * - * @param jsonString Json String - * @return BucketInfo - * @throws IOException - */ - public static BucketInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - /** - * Returns a List of ACLs set on the Bucket. - * - * @return List of Acl - */ - public List getAcls() { - return acls; - } - - /** - * Sets ACls. - * - * @param acls Acl list - */ - public void setAcls(List acls) { - this.acls = acls; - } - - /** - * Returns Storage Type info. - * - * @return Storage Type of the bucket - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Sets the Storage Type. - * - * @param storageType Storage Type - */ - public void setStorageType(StorageType storageType) { - this.storageType = storageType; - } - - /** - * Returns versioning. - * - * @return versioning Enum - */ - public OzoneConsts.Versioning getVersioning() { - return versioning; - } - - /** - * Sets Versioning. - * - * @param versioning - */ - public void setVersioning(OzoneConsts.Versioning versioning) { - this.versioning = versioning; - } - - - /** - * Gets bucket Name. - * - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Sets bucket Name. - * - * @param bucketName Name of the bucket - */ - public void setBucketName(String bucketName) { - this.bucketName = bucketName; - } - - /** - * Sets creation time of the bucket. - * - * @param creationTime Date String - */ - public void setCreatedOn(String creationTime) { - this.createdOn = creationTime; - } - - /** - * Returns creation time. - * - * @return creation time of bucket. - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * Returns Volume Name. - * - * @return String volume name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the Volume Name of bucket. - * - * @param volumeName volumeName - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * Please note : BucketInfo compare functions are used only within the - * context of a volume, hence volume name is purposefully ignored in - * compareTo, equal and hashcode functions of this class. - */ - @Override - public int compareTo(BucketInfo o) { - Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName())); - return this.bucketName.compareTo(o.getBucketName()); - } - - /** - * Checks if two bucketInfo's are equal. - * @param o Object BucketInfo - * @return True or False - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof BucketInfo)) { - return false; - } - - BucketInfo that = (BucketInfo) o; - Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName())); - return bucketName.equals(that.bucketName); - - } - - /** - * Hash Code for this object. - * @return int - */ - @Override - public int hashCode() { - return bucketName.hashCode(); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java deleted file mode 100644 index 61c2abb2a824f..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -/** - * KeyInfo class is used used for parsing json response - * when KeyInfo Call is made. - */ -public class KeyInfo implements Comparable { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(KeyInfo.class); - - private long version; - private String md5hash; - private String createdOn; - private String modifiedOn; - private long size; - private String keyName; - - /** - * When this key was created. - * - * @return Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * When this key was modified. - * - * @return Date String - */ - public String getModifiedOn() { - return modifiedOn; - } - - /** - * When this key was created. - * - * @param createdOn Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * When this key was modified. - * - * @param modifiedOn Date String - */ - public void setModifiedOn(String modifiedOn) { - this.modifiedOn = modifiedOn; - } - - /** - * Gets the Key name of this object. - * - * @return String - */ - public String getKeyName() { - return keyName; - } - - /** - * Sets the Key name of this object. - * - * @param keyName String - */ - public void setKeyName(String keyName) { - this.keyName = keyName; - } - - /** - * Returns the MD5 Hash for the data of this key. - * - * @return String MD5 - */ - public String getMd5hash() { - return md5hash; - } - - /** - * Sets the MD5 value of this key. - * - * @param md5hash Md5 of this file - */ - public void setMd5hash(String md5hash) { - this.md5hash = md5hash; - } - - /** - * Number of bytes stored in the data part of this key. - * - * @return long size of the data file - */ - public long getSize() { - return size; - } - - /** - * Sets the size of the data part of this key. - * - * @param size Size in long - */ - public void setSize(long size) { - this.size = size; - } - - /** - * Version of this key. - * - * @return returns the version of this key. - */ - public long getVersion() { - return version; - } - - /** - * Sets the version of this key. - * - * @param version - Version String - */ - public void setVersion(long version) { - this.version = version; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object - * is less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(KeyInfo o) { - if (this.keyName.compareTo(o.getKeyName()) != 0) { - return this.keyName.compareTo(o.getKeyName()); - } - - if (this.getVersion() == o.getVersion()) { - return 0; - } - if (this.getVersion() < o.getVersion()) { - return -1; - } - return 1; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfo keyInfo = (KeyInfo) o; - - return new EqualsBuilder() - .append(version, keyInfo.version) - .append(keyName, keyInfo.keyName) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(version) - .append(keyName) - .toHashCode(); - } - - /** - * Parse a string to return KeyInfo Object. - * - * @param jsonString Json String - * @return keyInfo - * @throws IOException - */ - public static KeyInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java deleted file mode 100644 index 98506f06a893f..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; -import java.util.List; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -/** - * KeyInfoDetails class is used for parsing json response - * when KeyInfoDetails Call is made. - */ -public class KeyInfoDetails extends KeyInfo { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(KeyInfoDetails.class); - - /** - * a list of Map which maps localID to ContainerID - * to specify replica locations. - */ - private List keyLocations; - - /** - * Constructor needed for json serialization. - */ - public KeyInfoDetails() { - } - - /** - * Set details of key location. - * - * @param locations - details of key location - */ - public void setKeyLocation(List locations) { - this.keyLocations = locations; - } - - /** - * Returns details of key location. - * - * @return volumeName - */ - public List getKeyLocations() { - return keyLocations; - } - - /** - * Parse a string to return KeyInfoDetails Object. - * - * @param jsonString Json String - * @return KeyInfoDetails - * @throws IOException - */ - public static KeyInfoDetails parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfoDetails that = (KeyInfoDetails) o; - - return new EqualsBuilder() - .append(getVersion(), that.getVersion()) - .append(getKeyName(), that.getKeyName()) - .append(keyLocations, that.keyLocations) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(21, 33) - .append(getVersion()) - .append(getKeyName()) - .append(keyLocations) - .toHashCode(); - } -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java deleted file mode 100644 index e5f46980ab1dd..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -/** - * KeyLocation class is used used for parsing json response - * when KeyInfoDetails Call is made. - */ -public class KeyLocation { - /** - * Which container this key stored. - */ - private long containerID; - /** - * Which block this key stored inside a container. - */ - private long localID; - /** - * Data length of this key replica. - */ - private long length; - /** - * Offset of this key. - */ - private long offset; - - /** - * Empty constructor for Json serialization. - */ - public KeyLocation() { - - } - - /** - * Constructs KeyLocation. - */ - public KeyLocation(long containerID, long localID, - long length, long offset) { - this.containerID = containerID; - this.localID = localID; - this.length = length; - this.offset = offset; - } - - /** - * Returns the containerID of this Key. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the localID of this Key. - */ - public long getLocalID() { - return localID; - } - - /** - * Returns the length of this Key. - */ - public long getLength() { - return length; - } - - /** - * Returns the offset of this Key. - */ - public long getOffset() { - return offset; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java deleted file mode 100644 index f98b56a6d3223..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.client.OzoneQuota; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; - -/** - * VolumeInfo Class is used for parsing json response - * when VolumeInfo Call is made. - */ -@InterfaceAudience.Private -public class VolumeInfo implements Comparable { - - - private static final ObjectReader READER = - new ObjectMapper().readerFor(VolumeInfo.class); - - private VolumeOwner owner; - private OzoneQuota quota; - private String volumeName; - private String createdOn; - private String createdBy; - - - /** - * Constructor for VolumeInfo. - * - * @param volumeName - Name of the Volume - * @param createdOn _ Date String - * @param createdBy - Person who created it - */ - public VolumeInfo(String volumeName, String createdOn, - String createdBy) { - this.volumeName = volumeName; - this.createdOn = createdOn; - this.createdBy = createdBy; - } - - /** - * Constructor for VolumeInfo. - */ - public VolumeInfo() { - } - - /** - * gets the volume name. - * - * @return Volume Name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the volume name. - * - * @param volumeName Volume Name - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - - /** - * Returns the name of the person who created this volume. - * - * @return Name of Admin who created this - */ - public String getCreatedBy() { - return createdBy; - } - - /** - * Sets the user name of the person who created this volume. - * - * @param createdBy UserName - */ - public void setCreatedBy(String createdBy) { - this.createdBy = createdBy; - } - - /** - * Gets the date on which this volume was created. - * - * @return Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * Sets the date string. - * - * @param createdOn Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * Returns the owner info. - * - * @return OwnerInfo - */ - public VolumeOwner getOwner() { - return owner; - } - - /** - * Sets the owner. - * - * @param owner OwnerInfo - */ - public void setOwner(VolumeOwner owner) { - this.owner = owner; - } - - /** - * Returns the quota information on a volume. - * - * @return Quota - */ - public OzoneQuota getQuota() { - return quota; - } - - /** - * Sets the quota info. - * - * @param quota Quota Info - */ - public void setQuota(OzoneQuota quota) { - this.quota = quota; - } - - /** - * Comparable Interface. - * @param o VolumeInfo Object. - * @return Result of comparison - */ - @Override - public int compareTo(VolumeInfo o) { - return this.volumeName.compareTo(o.getVolumeName()); - } - - /** - * Returns VolumeInfo class from json string. - * - * @param data Json String - * - * @return VolumeInfo - * - * @throws IOException - */ - public static VolumeInfo parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Indicates whether some other object is "equal to" this one. - * - * @param obj the reference object with which to compare. - * - * @return {@code true} if this object is the same as the obj - * argument; {@code false} otherwise. - */ - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - VolumeInfo otherInfo = (VolumeInfo) obj; - return otherInfo.getVolumeName().equals(this.getVolumeName()); - } - - /** - * Returns a hash code value for the object. This method is - * supported for the benefit of hash tables such as those provided by - * HashMap. - * @return a hash code value for this object. - * - * @see Object#equals(Object) - * @see System#identityHashCode - */ - @Override - public int hashCode() { - return getVolumeName().hashCode(); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java deleted file mode 100644 index d4dbad4cbf6e9..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import org.apache.hadoop.classification.InterfaceAudience; - -import com.fasterxml.jackson.annotation.JsonInclude; - -/** - * Volume Owner represents the owner of a volume. - * - * This is a class instead of a string since we might need to extend this class - * to support other forms of authentication. - */ -@InterfaceAudience.Private -public class VolumeOwner { - @JsonInclude(JsonInclude.Include.NON_NULL) - private String name; - - /** - * Constructor for VolumeOwner. - * - * @param name name of the User - */ - public VolumeOwner(String name) { - this.name = name; - } - - /** - * Constructs Volume Owner. - */ - public VolumeOwner() { - name = null; - } - - /** - * Returns the user name. - * - * @return Name - */ - public String getName() { - return name; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java deleted file mode 100644 index 432b029b6fb1e..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -/** - * This package contains class for ozone rest client library. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java deleted file mode 100644 index ffbca6a2b5d7b..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.freon; - -import java.io.IOException; -import java.io.PrintStream; -import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - -/** - * CLI utility to print out ozone related configuration. - */ -public class OzoneGetConf extends Configured implements Tool { - - private static final String DESCRIPTION = "ozone getconf is utility for " - + "getting configuration information from the config file.\n"; - - enum Command { - INCLUDE_FILE("-includeFile", - "gets the include file path that defines the datanodes " + - "that can join the cluster."), - EXCLUDE_FILE("-excludeFile", - "gets the exclude file path that defines the datanodes " + - "that need to decommissioned."), - OZONEMANAGER("-ozonemanagers", - "gets list of Ozone Manager nodes in the cluster"), - STORAGECONTAINERMANAGER("-storagecontainermanagers", - "gets list of ozone storage container manager nodes in the cluster"), - CONFKEY("-confKey [key]", "gets a specific key from the configuration"); - - private static final Map HANDLERS; - - static { - HANDLERS = new HashMap(); - HANDLERS.put(StringUtils.toLowerCase(OZONEMANAGER.getName()), - new OzoneManagersCommandHandler()); - HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()), - new StorageContainerManagersCommandHandler()); - HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()), - new PrintConfKeyCommandHandler()); - } - - private final String cmd; - private final String description; - - Command(String cmd, String description) { - this.cmd = cmd; - this.description = description; - } - - public String getName() { - return cmd.split(" ")[0]; - } - - public String getUsage() { - return cmd; - } - - public String getDescription() { - return description; - } - - public static OzoneGetConf.CommandHandler getHandler(String cmd) { - return HANDLERS.get(StringUtils.toLowerCase(cmd)); - } - } - - static final String USAGE; - static { - HdfsConfiguration.init(); - - /* Initialize USAGE based on Command values */ - StringBuilder usage = new StringBuilder(DESCRIPTION); - usage.append("\nozone getconf \n"); - for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) { - usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription() - + "\n"); - } - USAGE = usage.toString(); - } - - /** - * Handler to return value for key corresponding to the - * {@link OzoneGetConf.Command}. - */ - static class CommandHandler { - protected String key; // Configuration key to lookup - - CommandHandler() { - this(null); - } - - CommandHandler(String key) { - this.key = key; - } - - final int doWork(OzoneGetConf tool, String[] args) { - try { - checkArgs(args); - - return doWorkInternal(tool, args); - } catch (Exception e) { - tool.printError(e.getMessage()); - } - return -1; - } - - protected void checkArgs(String[] args) { - if (args.length > 0) { - throw new HadoopIllegalArgumentException( - "Did not expect argument: " + args[0]); - } - } - - - /** Method to be overridden by sub classes for specific behavior. */ - int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception { - - String value = tool.getConf().getTrimmed(key); - if (value != null) { - tool.printOut(value); - return 0; - } - tool.printError("Configuration " + key + " is missing."); - return -1; - } - } - - static class PrintConfKeyCommandHandler extends OzoneGetConf.CommandHandler { - @Override - protected void checkArgs(String[] args) { - if (args.length != 1) { - throw new HadoopIllegalArgumentException( - "usage: " + OzoneGetConf.Command.CONFKEY.getUsage()); - } - } - - @Override - int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception { - this.key = args[0]; - return super.doWorkInternal(tool, args); - } - } - - private final PrintStream out; // Stream for printing command output - private final PrintStream err; // Stream for printing error - - protected OzoneGetConf(Configuration conf) { - this(conf, System.out, System.err); - } - - protected OzoneGetConf(Configuration conf, PrintStream out, PrintStream err) { - super(conf); - this.out = out; - this.err = err; - } - - void printError(String message) { - err.println(message); - } - - void printOut(String message) { - out.println(message); - } - - private void printUsage() { - printError(USAGE); - } - - /** - * Main method that runs the tool for given arguments. - * @param args arguments - * @return return status of the command - */ - private int doWork(String[] args) { - if (args.length >= 1) { - OzoneGetConf.CommandHandler handler = - OzoneGetConf.Command.getHandler(args[0]); - if (handler != null) { - return handler.doWork(this, Arrays.copyOfRange(args, 1, args.length)); - } - } - printUsage(); - return -1; - } - - @Override - public int run(final String[] args) throws Exception { - return SecurityUtil.doAsCurrentUser( - new PrivilegedExceptionAction() { - @Override - public Integer run() throws Exception { - return doWork(args); - } - }); - } - - /** - * Handler for {@link Command#STORAGECONTAINERMANAGER}. - */ - static class StorageContainerManagersCommandHandler extends CommandHandler { - - @Override - public int doWorkInternal(OzoneGetConf tool, String[] args) - throws IOException { - Collection addresses = HddsUtils - .getSCMAddresses(tool.getConf()); - - for (InetSocketAddress addr : addresses) { - tool.printOut(addr.getHostName()); - } - return 0; - } - } - - /** - * Handler for {@link Command#OZONEMANAGER}. - */ - static class OzoneManagersCommandHandler extends CommandHandler { - @Override - public int doWorkInternal(OzoneGetConf tool, String[] args) - throws IOException { - tool.printOut(OmUtils.getOmAddress(tool.getConf()).getHostName()); - return 0; - } - } - - public static void main(String[] args) throws Exception { - if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { - System.exit(0); - } - - Configuration conf = new Configuration(); - conf.addResource(new OzoneConfiguration()); - int res = ToolRunner.run(new OzoneGetConf(conf), args); - System.exit(res); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java deleted file mode 100644 index 150c64e7d96f3..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.freon; -/** - * Classes related to Ozone tools. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java deleted file mode 100644 index b9ca2966311c9..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.ozone.OzoneAcl; -/** - * Ozone Manager Constants. - */ -public final class OMConfigKeys { - /** - * Never constructed. - */ - private OMConfigKeys() { - } - - - public static final String OZONE_OM_HANDLER_COUNT_KEY = - "ozone.om.handler.count.key"; - public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20; - - public static final String OZONE_OM_ADDRESS_KEY = - "ozone.om.address"; - public static final String OZONE_OM_BIND_HOST_DEFAULT = - "0.0.0.0"; - public static final int OZONE_OM_PORT_DEFAULT = 9862; - - public static final String OZONE_OM_HTTP_ENABLED_KEY = - "ozone.om.http.enabled"; - public static final String OZONE_OM_HTTP_BIND_HOST_KEY = - "ozone.om.http-bind-host"; - public static final String OZONE_OM_HTTPS_BIND_HOST_KEY = - "ozone.om.https-bind-host"; - public static final String OZONE_OM_HTTP_ADDRESS_KEY = - "ozone.om.http-address"; - public static final String OZONE_OM_HTTPS_ADDRESS_KEY = - "ozone.om.https-address"; - public static final String OZONE_OM_KEYTAB_FILE = - "ozone.om.keytab.file"; - public static final String OZONE_OM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_OM_HTTP_BIND_PORT_DEFAULT = 9874; - public static final int OZONE_OM_HTTPS_BIND_PORT_DEFAULT = 9875; - - // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB. - public static final String OZONE_OM_DB_CACHE_SIZE_MB = - "ozone.om.db.cache.size.mb"; - public static final int OZONE_OM_DB_CACHE_SIZE_DEFAULT = 128; - - public static final String OZONE_OM_USER_MAX_VOLUME = - "ozone.om.user.max.volume"; - public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024; - - // OM Default user/group permissions - public static final String OZONE_OM_USER_RIGHTS = - "ozone.om.user.rights"; - public static final OzoneAcl.OzoneACLRights OZONE_OM_USER_RIGHTS_DEFAULT = - OzoneAcl.OzoneACLRights.READ_WRITE; - - public static final String OZONE_OM_GROUP_RIGHTS = - "ozone.om.group.rights"; - public static final OzoneAcl.OzoneACLRights OZONE_OM_GROUP_RIGHTS_DEFAULT = - OzoneAcl.OzoneACLRights.READ_WRITE; - - public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK = - "ozone.key.deleting.limit.per.task"; - public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java deleted file mode 100644 index 1bd258e74297e..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.BucketArgs; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; - -/** - * A class that encapsulates Bucket Arguments. - */ -public final class OmBucketArgs implements Auditable { - /** - * Name of the volume in which the bucket belongs to. - */ - private final String volumeName; - /** - * Name of the bucket. - */ - private final String bucketName; - /** - * ACL's that are to be added for the bucket. - */ - private List addAcls; - /** - * ACL's that are to be removed from the bucket. - */ - private List removeAcls; - /** - * Bucket Version flag. - */ - private Boolean isVersionEnabled; - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param addAcls - ACL's to be added. - * @param removeAcls - ACL's to be removed. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - */ - private OmBucketArgs(String volumeName, String bucketName, - List addAcls, List removeAcls, - Boolean isVersionEnabled, StorageType storageType) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.addAcls = addAcls; - this.removeAcls = removeAcls; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - } - - /** - * Returns the Volume Name. - * @return String. - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns the Bucket Name. - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Returns the ACL's that are to be added. - * @return List - */ - public List getAddAcls() { - return addAcls; - } - - /** - * Returns the ACL's that are to be removed. - * @return List - */ - public List getRemoveAcls() { - return removeAcls; - } - - /** - * Returns true if bucket version is enabled, else false. - * @return isVersionEnabled - */ - public Boolean getIsVersionEnabled() { - return isVersionEnabled; - } - - /** - * Returns the type of storage to be used. - * @return StorageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns new builder class that builds a OmBucketArgs. - * - * @return Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, this.volumeName); - auditMap.put(OzoneConsts.BUCKET, this.bucketName); - if(this.addAcls != null){ - auditMap.put(OzoneConsts.ADD_ACLS, this.addAcls.toString()); - } - if(this.removeAcls != null){ - auditMap.put(OzoneConsts.REMOVE_ACLS, this.removeAcls.toString()); - } - auditMap.put(OzoneConsts.IS_VERSION_ENABLED, - String.valueOf(this.isVersionEnabled)); - if(this.storageType != null){ - auditMap.put(OzoneConsts.STORAGE_TYPE, this.storageType.name()); - } - return auditMap; - } - - /** - * Builder for OmBucketArgs. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private List addAcls; - private List removeAcls; - private Boolean isVersionEnabled; - private StorageType storageType; - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setAddAcls(List acls) { - this.addAcls = acls; - return this; - } - - public Builder setRemoveAcls(List acls) { - this.removeAcls = acls; - return this; - } - - public Builder setIsVersionEnabled(Boolean versionFlag) { - this.isVersionEnabled = versionFlag; - return this; - } - - public Builder setStorageType(StorageType storage) { - this.storageType = storage; - return this; - } - - /** - * Constructs the OmBucketArgs. - * @return instance of OmBucketArgs. - */ - public OmBucketArgs build() { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - return new OmBucketArgs(volumeName, bucketName, addAcls, - removeAcls, isVersionEnabled, storageType); - } - } - - /** - * Creates BucketArgs protobuf from OmBucketArgs. - */ - public BucketArgs getProtobuf() { - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName); - if(addAcls != null && !addAcls.isEmpty()) { - builder.addAllAddAcls(addAcls.stream().map( - OMPBHelper::convertOzoneAcl).collect(Collectors.toList())); - } - if(removeAcls != null && !removeAcls.isEmpty()) { - builder.addAllRemoveAcls(removeAcls.stream().map( - OMPBHelper::convertOzoneAcl).collect(Collectors.toList())); - } - if(isVersionEnabled != null) { - builder.setIsVersionEnabled(isVersionEnabled); - } - if(storageType != null) { - builder.setStorageType( - PBHelperClient.convertStorageType(storageType)); - } - return builder.build(); - } - - /** - * Parses BucketInfo protobuf and creates OmBucketArgs. - * @param bucketArgs - * @return instance of OmBucketArgs - */ - public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { - return new OmBucketArgs(bucketArgs.getVolumeName(), - bucketArgs.getBucketName(), - bucketArgs.getAddAclsList().stream().map( - OMPBHelper::convertOzoneAcl).collect(Collectors.toList()), - bucketArgs.getRemoveAclsList().stream().map( - OMPBHelper::convertOzoneAcl).collect(Collectors.toList()), - bucketArgs.hasIsVersionEnabled() ? - bucketArgs.getIsVersionEnabled() : null, - bucketArgs.hasStorageType() ? PBHelperClient.convertStorageType( - bucketArgs.getStorageType()) : null); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java deleted file mode 100644 index 5199ce3b58f4d..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ /dev/null @@ -1,254 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.BucketInfo; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; - -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * A class that encapsulates Bucket Info. - */ -public final class OmBucketInfo implements Auditable { - /** - * Name of the volume in which the bucket belongs to. - */ - private final String volumeName; - /** - * Name of the bucket. - */ - private final String bucketName; - /** - * ACL Information. - */ - private List acls; - /** - * Bucket Version flag. - */ - private Boolean isVersionEnabled; - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - /** - * Creation time of bucket. - */ - private final long creationTime; - - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param acls - list of ACLs. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - * @param creationTime - Bucket creation time. - */ - private OmBucketInfo(String volumeName, String bucketName, - List acls, boolean isVersionEnabled, - StorageType storageType, long creationTime) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.acls = acls; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.creationTime = creationTime; - } - - /** - * Returns the Volume Name. - * @return String. - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns the Bucket Name. - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Returns the ACL's associated with this bucket. - * @return List - */ - public List getAcls() { - return acls; - } - - /** - * Returns true if bucket version is enabled, else false. - * @return isVersionEnabled - */ - public boolean getIsVersionEnabled() { - return isVersionEnabled; - } - - /** - * Returns the type of storage to be used. - * @return StorageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns creation time. - * - * @return long - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns new builder class that builds a OmBucketInfo. - * - * @return Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, this.volumeName); - auditMap.put(OzoneConsts.BUCKET, this.bucketName); - auditMap.put(OzoneConsts.ACLS, - (this.acls != null) ? this.acls.toString() : null); - auditMap.put(OzoneConsts.IS_VERSION_ENABLED, - String.valueOf(this.isVersionEnabled)); - auditMap.put(OzoneConsts.STORAGE_TYPE, - (this.storageType != null) ? this.storageType.name() : null); - auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime)); - return auditMap; - } - - /** - * Builder for OmBucketInfo. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private List acls; - private Boolean isVersionEnabled; - private StorageType storageType; - private long creationTime; - - public Builder() { - //Default values - this.acls = new LinkedList<>(); - this.isVersionEnabled = false; - this.storageType = StorageType.DISK; - } - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; - return this; - } - - public Builder setIsVersionEnabled(Boolean versionFlag) { - this.isVersionEnabled = versionFlag; - return this; - } - - public Builder setStorageType(StorageType storage) { - this.storageType = storage; - return this; - } - - public Builder setCreationTime(long createdOn) { - this.creationTime = createdOn; - return this; - } - - /** - * Constructs the OmBucketInfo. - * @return instance of OmBucketInfo. - */ - public OmBucketInfo build() { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(acls); - Preconditions.checkNotNull(isVersionEnabled); - Preconditions.checkNotNull(storageType); - - return new OmBucketInfo(volumeName, bucketName, acls, - isVersionEnabled, storageType, creationTime); - } - } - - /** - * Creates BucketInfo protobuf from OmBucketInfo. - */ - public BucketInfo getProtobuf() { - return BucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .addAllAcls(acls.stream().map( - OMPBHelper::convertOzoneAcl).collect(Collectors.toList())) - .setIsVersionEnabled(isVersionEnabled) - .setStorageType(PBHelperClient.convertStorageType( - storageType)) - .setCreationTime(creationTime) - .build(); - } - - /** - * Parses BucketInfo protobuf and creates OmBucketInfo. - * @param bucketInfo - * @return instance of OmBucketInfo - */ - public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { - return new OmBucketInfo( - bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), - bucketInfo.getAclsList().stream().map( - OMPBHelper::convertOzoneAcl).collect(Collectors.toList()), - bucketInfo.getIsVersionEnabled(), - PBHelperClient.convertStorageType( - bucketInfo.getStorageType()), bucketInfo.getCreationTime()); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java deleted file mode 100644 index e56ad7f161b46..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ /dev/null @@ -1,167 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; - -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -/** - * Args for key. Client use this to specify key's attributes on key creation - * (putKey()). - */ -public final class OmKeyArgs implements Auditable { - private final String volumeName; - private final String bucketName; - private final String keyName; - private long dataSize; - private final ReplicationType type; - private final ReplicationFactor factor; - private List locationInfoList; - - private OmKeyArgs(String volumeName, String bucketName, String keyName, - long dataSize, ReplicationType type, ReplicationFactor factor, - List locationInfoList) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.type = type; - this.factor = factor; - this.locationInfoList = locationInfoList; - } - - public ReplicationType getType() { - return type; - } - - public ReplicationFactor getFactor() { - return factor; - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - - public String getKeyName() { - return keyName; - } - - public long getDataSize() { - return dataSize; - } - - public void setDataSize(long size) { - dataSize = size; - } - - public void setLocationInfoList(List locationInfoList) { - this.locationInfoList = locationInfoList; - } - - public List getLocationInfoList() { - return locationInfoList; - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, this.volumeName); - auditMap.put(OzoneConsts.BUCKET, this.bucketName); - auditMap.put(OzoneConsts.KEY, this.keyName); - auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(this.dataSize)); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - (this.type != null) ? this.type.name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - (this.factor != null) ? this.factor.name() : null); - auditMap.put(OzoneConsts.KEY_LOCATION_INFO, - (this.locationInfoList != null) ? locationInfoList.toString() : null); - return auditMap; - } - - @VisibleForTesting - public void addLocationInfo(OmKeyLocationInfo locationInfo) { - if (this.locationInfoList == null) { - locationInfoList = new ArrayList<>(); - } - locationInfoList.add(locationInfo); - } - - /** - * Builder class of OmKeyArgs. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private String keyName; - private long dataSize; - private ReplicationType type; - private ReplicationFactor factor; - private List locationInfoList; - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setKeyName(String key) { - this.keyName = key; - return this; - } - - public Builder setDataSize(long size) { - this.dataSize = size; - return this; - } - - public Builder setType(ReplicationType replicationType) { - this.type = replicationType; - return this; - } - - public Builder setFactor(ReplicationFactor replicationFactor) { - this.factor = replicationFactor; - return this; - } - - public Builder setLocationInfoList(List locationInfos) { - this.locationInfoList = locationInfos; - return this; - } - - public OmKeyArgs build() { - return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type, - factor, locationInfoList); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java deleted file mode 100644 index 50f4b17508a12..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ /dev/null @@ -1,303 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; -import org.apache.hadoop.util.Time; - -import java.io.IOException; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Args for key block. The block instance for the key requested in putKey. - * This is returned from OM to client, and client use class to talk to - * datanode. Also, this is the metadata written to om.db on server side. - */ -public final class OmKeyInfo { - private final String volumeName; - private final String bucketName; - // name of key client specified - private String keyName; - private long dataSize; - private List keyLocationVersions; - private final long creationTime; - private long modificationTime; - private HddsProtos.ReplicationType type; - private HddsProtos.ReplicationFactor factor; - - private OmKeyInfo(String volumeName, String bucketName, String keyName, - List versions, long dataSize, - long creationTime, long modificationTime, HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - // it is important that the versions are ordered from old to new. - // Do this sanity check when versions got loaded on creating OmKeyInfo. - // TODO : this is not necessary, here only because versioning is still a - // work in-progress, remove this following check when versioning is - // complete and prove correctly functioning - long currentVersion = -1; - for (OmKeyLocationInfoGroup version : versions) { - Preconditions.checkArgument( - currentVersion + 1 == version.getVersion()); - currentVersion = version.getVersion(); - } - this.keyLocationVersions = versions; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.factor = factor; - this.type = type; - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - - public HddsProtos.ReplicationType getType() { - return type; - } - - public HddsProtos.ReplicationFactor getFactor() { - return factor; - } - - public String getKeyName() { - return keyName; - } - - public void setKeyName(String keyName) { - this.keyName = keyName; - } - - public long getDataSize() { - return dataSize; - } - - public void setDataSize(long size) { - this.dataSize = size; - } - - public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() { - return keyLocationVersions.size() == 0? null : - keyLocationVersions.get(keyLocationVersions.size() - 1); - } - - public List getKeyLocationVersions() { - return keyLocationVersions; - } - - public void updateModifcationTime() { - this.modificationTime = Time.monotonicNow(); - } - - /** - * updates the length of the each block in the list given. - * This will be called when the key is being committed to OzoneManager. - * - * @param locationInfoList list of locationInfo - * @throws IOException - */ - public void updateLocationInfoList(List locationInfoList) { - long latestVersion = getLatestVersionLocations().getVersion(); - OmKeyLocationInfoGroup keyLocationInfoGroup = getLatestVersionLocations(); - List currentList = - keyLocationInfoGroup.getLocationList(); - List latestVersionList = - keyLocationInfoGroup.getBlocksLatestVersionOnly(); - // Updates the latest locationList in the latest version only with - // given locationInfoList here. - // TODO : The original allocated list and the updated list here may vary - // as the containers on the Datanode on which the blocks were pre allocated - // might get closed. The diff of blocks between these two lists here - // need to be garbage collected in case the ozone client dies. - currentList.removeAll(latestVersionList); - // set each of the locationInfo object to the latest version - locationInfoList.stream().forEach(omKeyLocationInfo -> omKeyLocationInfo - .setCreateVersion(latestVersion)); - currentList.addAll(locationInfoList); - } - - /** - * Append a set of blocks to the latest version. Note that these blocks are - * part of the latest version, not a new version. - * - * @param newLocationList the list of new blocks to be added. - * @throws IOException - */ - public synchronized void appendNewBlocks( - List newLocationList) throws IOException { - if (keyLocationVersions.size() == 0) { - throw new IOException("Appending new block, but no version exist"); - } - OmKeyLocationInfoGroup currentLatestVersion = - keyLocationVersions.get(keyLocationVersions.size() - 1); - currentLatestVersion.appendNewBlocks(newLocationList); - setModificationTime(Time.now()); - } - - /** - * Add a new set of blocks. The new blocks will be added as appending a new - * version to the all version list. - * - * @param newLocationList the list of new blocks to be added. - * @throws IOException - */ - public synchronized long addNewVersion( - List newLocationList) throws IOException { - long latestVersionNum; - if (keyLocationVersions.size() == 0) { - // no version exist, these blocks are the very first version. - keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList)); - latestVersionNum = 0; - } else { - // it is important that the new version are always at the tail of the list - OmKeyLocationInfoGroup currentLatestVersion = - keyLocationVersions.get(keyLocationVersions.size() - 1); - // the new version is created based on the current latest version - OmKeyLocationInfoGroup newVersion = - currentLatestVersion.generateNextVersion(newLocationList); - keyLocationVersions.add(newVersion); - latestVersionNum = newVersion.getVersion(); - } - setModificationTime(Time.now()); - return latestVersionNum; - } - - public long getCreationTime() { - return creationTime; - } - - public long getModificationTime() { - return modificationTime; - } - - public void setModificationTime(long modificationTime) { - this.modificationTime = modificationTime; - } - - /** - * Builder of OmKeyInfo. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private String keyName; - private long dataSize; - private List omKeyLocationInfoGroups; - private long creationTime; - private long modificationTime; - private HddsProtos.ReplicationType type; - private HddsProtos.ReplicationFactor factor; - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setKeyName(String key) { - this.keyName = key; - return this; - } - - public Builder setOmKeyLocationInfos( - List omKeyLocationInfoList) { - this.omKeyLocationInfoGroups = omKeyLocationInfoList; - return this; - } - - public Builder setDataSize(long size) { - this.dataSize = size; - return this; - } - - public Builder setCreationTime(long crTime) { - this.creationTime = crTime; - return this; - } - - public Builder setModificationTime(long mTime) { - this.modificationTime = mTime; - return this; - } - - public Builder setReplicationFactor(HddsProtos.ReplicationFactor factor) { - this.factor = factor; - return this; - } - - public Builder setReplicationType(HddsProtos.ReplicationType type) { - this.type = type; - return this; - } - - public OmKeyInfo build() { - return new OmKeyInfo( - volumeName, bucketName, keyName, omKeyLocationInfoGroups, - dataSize, creationTime, modificationTime, type, factor); - } - } - - public KeyInfo getProtobuf() { - long latestVersion = keyLocationVersions.size() == 0 ? -1 : - keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion(); - return KeyInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(dataSize) - .setFactor(factor) - .setType(type) - .addAllKeyLocationList(keyLocationVersions.stream() - .map(OmKeyLocationInfoGroup::getProtobuf) - .collect(Collectors.toList())) - .setLatestVersion(latestVersion) - .setCreationTime(creationTime) - .setModificationTime(modificationTime) - .build(); - } - - public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) { - return new OmKeyInfo( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getKeyName(), - keyInfo.getKeyLocationListList().stream() - .map(OmKeyLocationInfoGroup::getFromProtobuf) - .collect(Collectors.toList()), - keyInfo.getDataSize(), - keyInfo.getCreationTime(), - keyInfo.getModificationTime(), - keyInfo.getType(), - keyInfo.getFactor()); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java deleted file mode 100644 index 79b3c82b2dc42..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; - -/** - * One key can be too huge to fit in one container. In which case it gets split - * into a number of subkeys. This class represents one such subkey instance. - */ -public final class OmKeyLocationInfo { - private final BlockID blockID; - private final boolean shouldCreateContainer; - // the id of this subkey in all the subkeys. - private long length; - private final long offset; - // the version number indicating when this block was added - private long createVersion; - - private OmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer, - long length, long offset) { - this.blockID = blockID; - this.shouldCreateContainer = shouldCreateContainer; - this.length = length; - this.offset = offset; - } - - public void setCreateVersion(long version) { - createVersion = version; - } - - public long getCreateVersion() { - return createVersion; - } - - public BlockID getBlockID() { - return blockID; - } - - public long getContainerID() { - return blockID.getContainerID(); - } - - public long getLocalID() { - return blockID.getLocalID(); - } - - public boolean getShouldCreateContainer() { - return shouldCreateContainer; - } - - public long getLength() { - return length; - } - - public void setLength(long length) { - this.length = length; - } - - public long getOffset() { - return offset; - } - - /** - * Builder of OmKeyLocationInfo. - */ - public static class Builder { - private BlockID blockID; - private boolean shouldCreateContainer; - private long length; - private long offset; - - public Builder setBlockID(BlockID blockId) { - this.blockID = blockId; - return this; - } - - public Builder setShouldCreateContainer(boolean create) { - this.shouldCreateContainer = create; - return this; - } - - public Builder setLength(long len) { - this.length = len; - return this; - } - - public Builder setOffset(long off) { - this.offset = off; - return this; - } - - public OmKeyLocationInfo build() { - return new OmKeyLocationInfo(blockID, - shouldCreateContainer, length, offset); - } - } - - public KeyLocation getProtobuf() { - return KeyLocation.newBuilder() - .setBlockID(blockID.getProtobuf()) - .setShouldCreateContainer(shouldCreateContainer) - .setLength(length) - .setOffset(offset) - .setCreateVersion(createVersion) - .build(); - } - - public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) { - OmKeyLocationInfo info = new OmKeyLocationInfo( - BlockID.getFromProtobuf(keyLocation.getBlockID()), - keyLocation.getShouldCreateContainer(), - keyLocation.getLength(), - keyLocation.getOffset()); - info.setCreateVersion(keyLocation.getCreateVersion()); - return info; - } - - @Override - public String toString() { - return "{blockID={containerID=" + blockID.getContainerID() + - ", localID=" + blockID.getLocalID() + "}" + - ", shouldCreateContainer=" + shouldCreateContainer + - ", length=" + length + - ", offset=" + offset + - ", createVersion=" + createVersion + '}'; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java deleted file mode 100644 index 8bdcee3803c8b..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -/** - * A list of key locations. This class represents one single version of the - * blocks of a key. - */ -public class OmKeyLocationInfoGroup { - private final long version; - private final List locationList; - - public OmKeyLocationInfoGroup(long version, - List locations) { - this.version = version; - this.locationList = locations; - } - - /** - * Return only the blocks that are created in the most recent version. - * - * @return the list of blocks that are created in the latest version. - */ - public List getBlocksLatestVersionOnly() { - List list = new ArrayList<>(); - locationList.stream().filter(x -> x.getCreateVersion() == version) - .forEach(list::add); - return list; - } - - public long getVersion() { - return version; - } - - public List getLocationList() { - return locationList; - } - - public KeyLocationList getProtobuf() { - return KeyLocationList.newBuilder() - .setVersion(version) - .addAllKeyLocations( - locationList.stream().map(OmKeyLocationInfo::getProtobuf) - .collect(Collectors.toList())) - .build(); - } - - public static OmKeyLocationInfoGroup getFromProtobuf( - KeyLocationList keyLocationList) { - return new OmKeyLocationInfoGroup( - keyLocationList.getVersion(), - keyLocationList.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList())); - } - - /** - * Given a new block location, generate a new version list based upon this - * one. - * - * @param newLocationList a list of new location to be added. - * @return - */ - OmKeyLocationInfoGroup generateNextVersion( - List newLocationList) throws IOException { - // TODO : revisit if we can do this method more efficiently - // one potential inefficiency here is that later version always include - // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add - // more - List newList = new ArrayList<>(); - newList.addAll(locationList); - for (OmKeyLocationInfo newInfo : newLocationList) { - // all these new blocks will have addVersion of current version + 1 - newInfo.setCreateVersion(version + 1); - newList.add(newInfo); - } - return new OmKeyLocationInfoGroup(version + 1, newList); - } - - void appendNewBlocks(List newLocationList) - throws IOException { - for (OmKeyLocationInfo info : newLocationList) { - info.setCreateVersion(version); - locationList.add(info); - } - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("version:").append(version).append(" "); - for (OmKeyLocationInfo kli : locationList) { - sb.append(kli.getLocalID()).append(" || "); - } - return sb.toString(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java deleted file mode 100644 index de75a05e9d92a..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; - -import java.util.List; -import java.util.LinkedList; -import java.util.Map; -import java.util.ArrayList; -import java.util.HashMap; - -/** - * This helper class keeps a map of all user and their permissions. - */ -public class OmOzoneAclMap { - // per Acl Type user:rights map - private ArrayList> aclMaps; - - OmOzoneAclMap() { - aclMaps = new ArrayList<>(); - for (OzoneAclType aclType : OzoneAclType.values()) { - aclMaps.add(aclType.ordinal(), new HashMap<>()); - } - } - - private Map getMap(OzoneAclType type) { - return aclMaps.get(type.ordinal()); - } - - // For a given acl type and user, get the stored acl - private OzoneAclRights getAcl(OzoneAclType type, String user) { - return getMap(type).get(user); - } - - // Add a new acl to the map - public void addAcl(OzoneAclInfo acl) { - getMap(acl.getType()).put(acl.getName(), acl.getRights()); - } - - // for a given acl, check if the user has access rights - public boolean hasAccess(OzoneAclInfo acl) { - OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName()); - if (storedRights != null) { - switch (acl.getRights()) { - case READ: - return (storedRights == OzoneAclRights.READ) - || (storedRights == OzoneAclRights.READ_WRITE); - case WRITE: - return (storedRights == OzoneAclRights.WRITE) - || (storedRights == OzoneAclRights.READ_WRITE); - case READ_WRITE: - return (storedRights == OzoneAclRights.READ_WRITE); - default: - return false; - } - } else { - return false; - } - } - - // Convert this map to OzoneAclInfo Protobuf List - public List ozoneAclGetProtobuf() { - List aclList = new LinkedList<>(); - for (OzoneAclType type: OzoneAclType.values()) { - for (Map.Entry entry : - aclMaps.get(type.ordinal()).entrySet()) { - OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder() - .setName(entry.getKey()) - .setType(type) - .setRights(entry.getValue()) - .build(); - aclList.add(aclInfo); - } - } - - return aclList; - } - - // Create map from list of OzoneAclInfos - public static OmOzoneAclMap ozoneAclGetFromProtobuf( - List aclList) { - OmOzoneAclMap aclMap = new OmOzoneAclMap(); - for (OzoneAclInfo acl : aclList) { - aclMap.addAcl(acl); - } - return aclMap; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java deleted file mode 100644 index 165d9aba783b5..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; - -import java.io.IOException; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - - -/** - * A class that encapsulates the OmVolumeArgs Args. - */ -public final class OmVolumeArgs implements Auditable{ - private final String adminName; - private final String ownerName; - private final String volume; - private final long creationTime; - private final long quotaInBytes; - private final Map keyValueMap; - private final OmOzoneAclMap aclMap; - - /** - * Private constructor, constructed via builder. - * @param adminName - Administrator's name. - * @param ownerName - Volume owner's name - * @param volume - volume name - * @param quotaInBytes - Volume Quota in bytes. - * @param keyValueMap - keyValue map. - * @param aclMap - User to access rights map. - * @param creationTime - Volume creation time. - */ - private OmVolumeArgs(String adminName, String ownerName, String volume, - long quotaInBytes, Map keyValueMap, - OmOzoneAclMap aclMap, long creationTime) { - this.adminName = adminName; - this.ownerName = ownerName; - this.volume = volume; - this.quotaInBytes = quotaInBytes; - this.keyValueMap = keyValueMap; - this.aclMap = aclMap; - this.creationTime = creationTime; - } - - /** - * Returns the Admin Name. - * @return String. - */ - public String getAdminName() { - return adminName; - } - - /** - * Returns the owner Name. - * @return String - */ - public String getOwnerName() { - return ownerName; - } - - /** - * Returns the volume Name. - * @return String - */ - public String getVolume() { - return volume; - } - - /** - * Returns creation time. - * @return long - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns Quota in Bytes. - * @return long, Quota in bytes. - */ - public long getQuotaInBytes() { - return quotaInBytes; - } - - public Map getKeyValueMap() { - return keyValueMap; - } - - public OmOzoneAclMap getAclMap() { - return aclMap; - } - /** - * Returns new builder class that builds a OmVolumeArgs. - * - * @return Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.ADMIN, this.adminName); - auditMap.put(OzoneConsts.OWNER, this.ownerName); - auditMap.put(OzoneConsts.VOLUME, this.volume); - auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime)); - auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes)); - return auditMap; - } - - /** - * Builder for OmVolumeArgs. - */ - public static class Builder { - private String adminName; - private String ownerName; - private String volume; - private long creationTime; - private long quotaInBytes; - private Map keyValueMap; - private OmOzoneAclMap aclMap; - - /** - * Constructs a builder. - */ - public Builder() { - keyValueMap = new HashMap<>(); - aclMap = new OmOzoneAclMap(); - } - - public Builder setAdminName(String admin) { - this.adminName = admin; - return this; - } - - public Builder setOwnerName(String owner) { - this.ownerName = owner; - return this; - } - - public Builder setVolume(String volumeName) { - this.volume = volumeName; - return this; - } - - public Builder setCreationTime(long createdOn) { - this.creationTime = createdOn; - return this; - } - - public Builder setQuotaInBytes(long quota) { - this.quotaInBytes = quota; - return this; - } - - public Builder addMetadata(String key, String value) { - keyValueMap.put(key, value); // overwrite if present. - return this; - } - - public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException { - aclMap.addAcl(acl); - return this; - } - - /** - * Constructs a CreateVolumeArgument. - * @return CreateVolumeArgs. - */ - public OmVolumeArgs build() { - Preconditions.checkNotNull(adminName); - Preconditions.checkNotNull(ownerName); - Preconditions.checkNotNull(volume); - return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - keyValueMap, aclMap, creationTime); - } - } - - public VolumeInfo getProtobuf() { - List metadataList = new LinkedList<>(); - for (Map.Entry entry : keyValueMap.entrySet()) { - metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()). - setValue(entry.getValue()).build()); - } - List aclList = aclMap.ozoneAclGetProtobuf(); - - return VolumeInfo.newBuilder() - .setAdminName(adminName) - .setOwnerName(ownerName) - .setVolume(volume) - .setQuotaInBytes(quotaInBytes) - .addAllMetadata(metadataList) - .addAllVolumeAcls(aclList) - .setCreationTime(creationTime) - .build(); - } - - public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) { - Map kvMap = volInfo.getMetadataList().stream() - .collect(Collectors.toMap(KeyValue::getKey, - KeyValue::getValue)); - OmOzoneAclMap aclMap = - OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList()); - - return new OmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(), - volInfo.getVolume(), volInfo.getQuotaInBytes(), kvMap, aclMap, - volInfo.getCreationTime()); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java deleted file mode 100644 index 11ee622494ddf..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -/** - * This class represents a open key "session". A session here means a key is - * opened by a specific client, the client sends the handler to server, such - * that servers can recognize this client, and thus know how to close the key. - */ -public class OpenKeySession { - private final long id; - private final OmKeyInfo keyInfo; - // the version of the key when it is being opened in this session. - // a block that has a create version equals to open version means it will - // be committed only when this open session is closed. - private long openVersion; - - public OpenKeySession(long id, OmKeyInfo info, long version) { - this.id = id; - this.keyInfo = info; - this.openVersion = version; - } - - public long getOpenVersion() { - return this.openVersion; - } - - public OmKeyInfo getKeyInfo() { - return keyInfo; - } - - public long getId() { - return id; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java deleted file mode 100644 index 9b03aefe1a8d0..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.client.rest.response.BucketInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .ServicePort; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * ServiceInfo holds the config details of Ozone services. - */ -public final class ServiceInfo { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(ServiceInfo.class); - private static final ObjectWriter WRITER = - new ObjectMapper().writerWithDefaultPrettyPrinter(); - - /** - * Type of node/service. - */ - private NodeType nodeType; - /** - * Hostname of the node in which the service is running. - */ - private String hostname; - - /** - * List of ports the service listens to. - */ - private Map ports; - - /** - * Default constructor for JSON deserialization. - */ - public ServiceInfo() {} - - /** - * Constructs the ServiceInfo for the {@code nodeType}. - * @param nodeType type of node/service - * @param hostname hostname of the service - * @param portList list of ports the service listens to - */ - private ServiceInfo( - NodeType nodeType, String hostname, List portList) { - Preconditions.checkNotNull(nodeType); - Preconditions.checkNotNull(hostname); - this.nodeType = nodeType; - this.hostname = hostname; - this.ports = new HashMap<>(); - for (ServicePort port : portList) { - ports.put(port.getType(), port.getValue()); - } - } - - /** - * Returns the type of node/service. - * @return node type - */ - public NodeType getNodeType() { - return nodeType; - } - - /** - * Returns the hostname of the service. - * @return hostname - */ - public String getHostname() { - return hostname; - } - - /** - * Returns ServicePort.Type to port mappings. - * @return ports - */ - public Map getPorts() { - return ports; - } - - /** - * Returns the port for given type, null if the service doesn't support - * the type. - * - * @param type the type of port. - * ex: RPC, HTTP, HTTPS, etc.. - */ - @JsonIgnore - public int getPort(ServicePort.Type type) { - return ports.get(type); - } - - /** - * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo. - * - * @return OzoneManagerProtocolProtos.ServiceInfo - */ - @JsonIgnore - public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() { - OzoneManagerProtocolProtos.ServiceInfo.Builder builder = - OzoneManagerProtocolProtos.ServiceInfo.newBuilder(); - builder.setNodeType(nodeType) - .setHostname(hostname) - .addAllServicePorts( - ports.entrySet().stream() - .map( - entry -> - ServicePort.newBuilder() - .setType(entry.getKey()) - .setValue(entry.getValue()).build()) - .collect(Collectors.toList())); - return builder.build(); - } - - /** - * Converts OzoneManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}. - * - * @return {@link ServiceInfo} - */ - @JsonIgnore - public static ServiceInfo getFromProtobuf( - OzoneManagerProtocolProtos.ServiceInfo serviceInfo) { - return new ServiceInfo(serviceInfo.getNodeType(), - serviceInfo.getHostname(), - serviceInfo.getServicePortsList()); - } - - /** - * Returns a JSON string of this object. - * - * @return String - json string - * @throws IOException - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Parse a JSON string into ServiceInfo Object. - * - * @param jsonString Json String - * @return BucketInfo - * @throws IOException - */ - public static BucketInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - /** - * Creates a new builder to build {@link ServiceInfo}. - * @return {@link ServiceInfo.Builder} - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder used to build/construct {@link ServiceInfo}. - */ - public static class Builder { - - private NodeType node; - private String host; - private List portList = new ArrayList<>(); - - - /** - * Sets the node/service type. - * @param nodeType type of node - * @return the builder - */ - public Builder setNodeType(NodeType nodeType) { - node = nodeType; - return this; - } - - /** - * Sets the hostname of the service. - * @param hostname service hostname - * @return the builder - */ - public Builder setHostname(String hostname) { - host = hostname; - return this; - } - - /** - * Adds the service port to the service port list. - * @param servicePort RPC port - * @return the builder - */ - public Builder addServicePort(ServicePort servicePort) { - portList.add(servicePort); - return this; - } - - - /** - * Builds and returns {@link ServiceInfo} with the set values. - * @return {@link ServiceInfo} - */ - public ServiceInfo build() { - return new ServiceInfo(node, host, portList); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java deleted file mode 100644 index 6fc7c8fcc5353..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import com.google.common.base.Preconditions; - -import java.util.HashMap; -import java.util.Map; - -/** - * A class that encapsulates the createVolume Args. - */ -public final class VolumeArgs { - private final String adminName; - private final String ownerName; - private final String volume; - private final long quotaInBytes; - private final Map extendedAttributes; - - /** - * Private constructor, constructed via builder. - * - * @param adminName - Administrator name. - * @param ownerName - Volume owner's name - * @param volume - volume name - * @param quotaInBytes - Volume Quota in bytes. - * @param keyValueMap - keyValue map. - */ - private VolumeArgs(String adminName, String ownerName, String volume, - long quotaInBytes, Map keyValueMap) { - this.adminName = adminName; - this.ownerName = ownerName; - this.volume = volume; - this.quotaInBytes = quotaInBytes; - this.extendedAttributes = keyValueMap; - } - - /** - * Returns the Admin Name. - * - * @return String. - */ - public String getAdminName() { - return adminName; - } - - /** - * Returns the owner Name. - * - * @return String - */ - public String getOwnerName() { - return ownerName; - } - - /** - * Returns the volume Name. - * - * @return String - */ - public String getVolume() { - return volume; - } - - /** - * Returns Quota in Bytes. - * - * @return long, Quota in bytes. - */ - public long getQuotaInBytes() { - return quotaInBytes; - } - - public Map getExtendedAttributes() { - return extendedAttributes; - } - - static class Builder { - private String adminName; - private String ownerName; - private String volume; - private long quotaInBytes; - private Map extendedAttributes; - - /** - * Constructs a builder. - */ - Builder() { - extendedAttributes = new HashMap<>(); - } - - public void setAdminName(String adminName) { - this.adminName = adminName; - } - - public void setOwnerName(String ownerName) { - this.ownerName = ownerName; - } - - public void setVolume(String volume) { - this.volume = volume; - } - - public void setQuotaInBytes(long quotaInBytes) { - this.quotaInBytes = quotaInBytes; - } - - public void addMetadata(String key, String value) { - extendedAttributes.put(key, value); // overwrite if present. - } - - /** - * Constructs a CreateVolumeArgument. - * - * @return CreateVolumeArgs. - */ - public VolumeArgs build() { - Preconditions.checkNotNull(adminName); - Preconditions.checkNotNull(ownerName); - Preconditions.checkNotNull(volume); - return new VolumeArgs(adminName, ownerName, volume, quotaInBytes, - extendedAttributes); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java deleted file mode 100644 index b1211d8cb86a9..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java deleted file mode 100644 index 1744cffc134ee..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; -/** - This package contains client side protocol library to communicate with OM. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java deleted file mode 100644 index edb260a108417..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ /dev/null @@ -1,253 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.protocol; - -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; -import java.io.IOException; -import java.util.List; - -/** - * Protocol to talk to OM. - */ -public interface OzoneManagerProtocol { - - /** - * Creates a volume. - * @param args - Arguments to create Volume. - * @throws IOException - */ - void createVolume(OmVolumeArgs args) throws IOException; - - /** - * Changes the owner of a volume. - * @param volume - Name of the volume. - * @param owner - Name of the owner. - * @throws IOException - */ - void setOwner(String volume, String owner) throws IOException; - - /** - * Changes the Quota on a volume. - * @param volume - Name of the volume. - * @param quota - Quota in bytes. - * @throws IOException - */ - void setQuota(String volume, long quota) throws IOException; - - /** - * Checks if the specified user can access this volume. - * @param volume - volume - * @param userAcl - user acls which needs to be checked for access - * @return true if the user has required access for the volume, - * false otherwise - * @throws IOException - */ - boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) - throws IOException; - - /** - * Gets the volume information. - * @param volume - Volume name. - * @return VolumeArgs or exception is thrown. - * @throws IOException - */ - OmVolumeArgs getVolumeInfo(String volume) throws IOException; - - /** - * Deletes an existing empty volume. - * @param volume - Name of the volume. - * @throws IOException - */ - void deleteVolume(String volume) throws IOException; - - /** - * Lists volume owned by a specific user. - * @param userName - user name - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - List listVolumeByUser(String userName, String prefix, String - prevKey, int maxKeys) throws IOException; - - /** - * Lists volume all volumes in the cluster. - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - List listAllVolumes(String prefix, String - prevKey, int maxKeys) throws IOException; - - /** - * Creates a bucket. - * @param bucketInfo - BucketInfo to create Bucket. - * @throws IOException - */ - void createBucket(OmBucketInfo bucketInfo) throws IOException; - - /** - * Gets the bucket information. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @return OmBucketInfo or exception is thrown. - * @throws IOException - */ - OmBucketInfo getBucketInfo(String volumeName, String bucketName) - throws IOException; - - /** - * Sets bucket property from args. - * @param args - BucketArgs. - * @throws IOException - */ - void setBucketProperty(OmBucketArgs args) throws IOException; - - /** - * Open the given key and return an open key session. - * - * @param args the args of the key. - * @return OpenKeySession instance that client uses to talk to container. - * @throws IOException - */ - OpenKeySession openKey(OmKeyArgs args) throws IOException; - - /** - * Commit a key. This will make the change from the client visible. The client - * is identified by the clientID. - * - * @param args the key to commit - * @param clientID the client identification - * @throws IOException - */ - void commitKey(OmKeyArgs args, long clientID) throws IOException; - - /** - * Allocate a new block, it is assumed that the client is having an open key - * session going on. This block will be appended to this open key session. - * - * @param args the key to append - * @param clientID the client identification - * @return an allocated block - * @throws IOException - */ - OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID) - throws IOException; - - /** - * Look up for the container of an existing key. - * - * @param args the args of the key. - * @return OmKeyInfo instance that client uses to talk to container. - * @throws IOException - */ - OmKeyInfo lookupKey(OmKeyArgs args) throws IOException; - - /** - * Rename an existing key within a bucket. - * @param args the args of the key. - * @param toKeyName New name to be used for the Key - * @throws IOException - */ - void renameKey(OmKeyArgs args, String toKeyName) throws IOException; - - /** - * Deletes an existing key. - * - * @param args the args of the key. - * @throws IOException - */ - void deleteKey(OmKeyArgs args) throws IOException; - - /** - * Deletes an existing empty bucket from volume. - * @param volume - Name of the volume. - * @param bucket - Name of the bucket. - * @throws IOException - */ - void deleteBucket(String volume, String bucket) throws IOException; - - /** - * Returns a list of buckets represented by {@link OmBucketInfo} - * in the given volume. Argument volumeName is required, others - * are optional. - * - * @param volumeName - * the name of the volume. - * @param startBucketName - * the start bucket name, only the buckets whose name is - * after this value will be included in the result. - * @param bucketPrefix - * bucket name prefix, only the buckets whose name has - * this prefix will be included in the result. - * @param maxNumOfBuckets - * the maximum number of buckets to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of buckets. - * @throws IOException - */ - List listBuckets(String volumeName, - String startBucketName, String bucketPrefix, int maxNumOfBuckets) - throws IOException; - - /** - * Returns a list of keys represented by {@link OmKeyInfo} - * in the given bucket. Argument volumeName, bucketName is required, - * others are optional. - * - * @param volumeName - * the name of the volume. - * @param bucketName - * the name of the bucket. - * @param startKeyName - * the start key name, only the keys whose name is - * after this value will be included in the result. - * @param keyPrefix - * key name prefix, only the keys whose name has - * this prefix will be included in the result. - * @param maxKeys - * the maximum number of keys to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of keys. - * @throws IOException - */ - List listKeys(String volumeName, - String bucketName, String startKeyName, String keyPrefix, int maxKeys) - throws IOException; - - /** - * Returns list of Ozone services with its configuration details. - * - * @return list of Ozone services - * @throws IOException - */ - List getServiceList() throws IOException; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java deleted file mode 100644 index 9c7f3888d31ff..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.protocol; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java deleted file mode 100644 index c0829fabb98d9..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,775 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.protocolPB; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.AllocateBlockRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.AllocateBlockResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CommitKeyRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CommitKeyResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.BucketArgs; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.BucketInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.InfoBucketRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.InfoBucketResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.SetBucketPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.SetBucketPropertyResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.DeleteBucketResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CreateVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.LocateKeyRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.LocateKeyResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.RenameKeyRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.RenameKeyResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.KeyArgs; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.SetVolumePropertyResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.DeleteVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.DeleteVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.InfoVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.InfoVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CheckVolumeAccessRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.CheckVolumeAccessResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ListBucketsRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ListBucketsResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ListKeysRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ListKeysResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.Status; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ListVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ListVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ServiceListRequest; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ServiceListResponse; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.ArrayList; -import java.util.stream.Collectors; - -/** - * The client side implementation of OzoneManagerProtocol. - */ - -@InterfaceAudience.Private -public final class OzoneManagerProtocolClientSideTranslatorPB - implements OzoneManagerProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - - private final OzoneManagerProtocolPB rpcProxy; - - /** - * Constructor for KeySpaceManger Client. - * @param rpcProxy - */ - public OzoneManagerProtocolClientSideTranslatorPB( - OzoneManagerProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } - - /** - * Creates a volume. - * - * @param args - Arguments to create Volume. - * @throws IOException - */ - @Override - public void createVolume(OmVolumeArgs args) throws IOException { - CreateVolumeRequest.Builder req = - CreateVolumeRequest.newBuilder(); - VolumeInfo volumeInfo = args.getProtobuf(); - req.setVolumeInfo(volumeInfo); - - final CreateVolumeResponse resp; - try { - resp = rpcProxy.createVolume(NULL_RPC_CONTROLLER, - req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - - if (resp.getStatus() != Status.OK) { - throw new - IOException("Volume creation failed, error:" + resp.getStatus()); - } - } - - /** - * Changes the owner of a volume. - * - * @param volume - Name of the volume. - * @param owner - Name of the owner. - * @throws IOException - */ - @Override - public void setOwner(String volume, String owner) throws IOException { - SetVolumePropertyRequest.Builder req = - SetVolumePropertyRequest.newBuilder(); - req.setVolumeName(volume).setOwnerName(owner); - final SetVolumePropertyResponse resp; - try { - resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new - IOException("Volume owner change failed, error:" + resp.getStatus()); - } - } - - /** - * Changes the Quota on a volume. - * - * @param volume - Name of the volume. - * @param quota - Quota in bytes. - * @throws IOException - */ - @Override - public void setQuota(String volume, long quota) throws IOException { - SetVolumePropertyRequest.Builder req = - SetVolumePropertyRequest.newBuilder(); - req.setVolumeName(volume).setQuotaInBytes(quota); - final SetVolumePropertyResponse resp; - try { - resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new - IOException("Volume quota change failed, error:" + resp.getStatus()); - } - } - - /** - * Checks if the specified user can access this volume. - * - * @param volume - volume - * @param userAcl - user acls which needs to be checked for access - * @return true if the user has required access for the volume, - * false otherwise - * @throws IOException - */ - @Override - public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws - IOException { - CheckVolumeAccessRequest.Builder req = - CheckVolumeAccessRequest.newBuilder(); - req.setVolumeName(volume).setUserAcl(userAcl); - final CheckVolumeAccessResponse resp; - try { - resp = rpcProxy.checkVolumeAccess(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - - if (resp.getStatus() == Status.ACCESS_DENIED) { - return false; - } else if (resp.getStatus() == Status.OK) { - return true; - } else { - throw new - IOException("Check Volume Access failed, error:" + resp.getStatus()); - } - } - - /** - * Gets the volume information. - * - * @param volume - Volume name. - * @return OmVolumeArgs or exception is thrown. - * @throws IOException - */ - @Override - public OmVolumeArgs getVolumeInfo(String volume) throws IOException { - InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder(); - req.setVolumeName(volume); - final InfoVolumeResponse resp; - try { - resp = rpcProxy.infoVolume(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new - IOException("Info Volume failed, error:" + resp.getStatus()); - } - return OmVolumeArgs.getFromProtobuf(resp.getVolumeInfo()); - } - - /** - * Deletes an existing empty volume. - * - * @param volume - Name of the volume. - * @throws IOException - */ - @Override - public void deleteVolume(String volume) throws IOException { - DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder(); - req.setVolumeName(volume); - final DeleteVolumeResponse resp; - try { - resp = rpcProxy.deleteVolume(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new - IOException("Delete Volume failed, error:" + resp.getStatus()); - } - } - - /** - * Lists volume owned by a specific user. - * - * @param userName - user name - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the - * prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - @Override - public List listVolumeByUser(String userName, String prefix, - String prevKey, int maxKeys) - throws IOException { - ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder(); - if (!Strings.isNullOrEmpty(prefix)) { - builder.setPrefix(prefix); - } - if (!Strings.isNullOrEmpty(prevKey)) { - builder.setPrevKey(prevKey); - } - builder.setMaxKeys(maxKeys); - builder.setUserName(userName); - builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER); - return listVolume(builder.build()); - } - - /** - * Lists volume all volumes in the cluster. - * - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the - * prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - @Override - public List listAllVolumes(String prefix, String prevKey, - int maxKeys) throws IOException { - ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder(); - if (!Strings.isNullOrEmpty(prefix)) { - builder.setPrefix(prefix); - } - if (!Strings.isNullOrEmpty(prevKey)) { - builder.setPrevKey(prevKey); - } - builder.setMaxKeys(maxKeys); - builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER); - return listVolume(builder.build()); - } - - private List listVolume(ListVolumeRequest request) - throws IOException { - final ListVolumeResponse resp; - try { - resp = rpcProxy.listVolumes(NULL_RPC_CONTROLLER, request); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - - if (resp.getStatus() != Status.OK) { - throw new IOException("List volume failed, error: " - + resp.getStatus()); - } - - List result = Lists.newArrayList(); - for (VolumeInfo volInfo : resp.getVolumeInfoList()) { - OmVolumeArgs volArgs = OmVolumeArgs.getFromProtobuf(volInfo); - result.add(volArgs); - } - - return resp.getVolumeInfoList().stream() - .map(item -> OmVolumeArgs.getFromProtobuf(item)) - .collect(Collectors.toList()); - } - - /** - * Creates a bucket. - * - * @param bucketInfo - BucketInfo to create bucket. - * @throws IOException - */ - @Override - public void createBucket(OmBucketInfo bucketInfo) throws IOException { - CreateBucketRequest.Builder req = - CreateBucketRequest.newBuilder(); - BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf(); - req.setBucketInfo(bucketInfoProtobuf); - - final CreateBucketResponse resp; - try { - resp = rpcProxy.createBucket(NULL_RPC_CONTROLLER, - req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Bucket creation failed, error: " - + resp.getStatus()); - } - } - - /** - * Gets the bucket information. - * - * @param volume - Volume name. - * @param bucket - Bucket name. - * @return OmBucketInfo or exception is thrown. - * @throws IOException - */ - @Override - public OmBucketInfo getBucketInfo(String volume, String bucket) - throws IOException { - InfoBucketRequest.Builder req = - InfoBucketRequest.newBuilder(); - req.setVolumeName(volume); - req.setBucketName(bucket); - - final InfoBucketResponse resp; - try { - resp = rpcProxy.infoBucket(NULL_RPC_CONTROLLER, - req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() == Status.OK) { - return OmBucketInfo.getFromProtobuf(resp.getBucketInfo()); - } else { - throw new IOException("Info Bucket failed, error: " - + resp.getStatus()); - } - } - - /** - * Sets bucket property from args. - * @param args - BucketArgs. - * @throws IOException - */ - @Override - public void setBucketProperty(OmBucketArgs args) - throws IOException { - SetBucketPropertyRequest.Builder req = - SetBucketPropertyRequest.newBuilder(); - BucketArgs bucketArgs = args.getProtobuf(); - req.setBucketArgs(bucketArgs); - final SetBucketPropertyResponse resp; - try { - resp = rpcProxy.setBucketProperty(NULL_RPC_CONTROLLER, - req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Setting bucket property failed, error: " - + resp.getStatus()); - } - } - - /** - * List buckets in a volume. - * - * @param volumeName - * @param startKey - * @param prefix - * @param count - * @return - * @throws IOException - */ - @Override - public List listBuckets(String volumeName, - String startKey, String prefix, int count) throws IOException { - List buckets = new ArrayList<>(); - ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder(); - reqBuilder.setVolumeName(volumeName); - reqBuilder.setCount(count); - if (startKey != null) { - reqBuilder.setStartKey(startKey); - } - if (prefix != null) { - reqBuilder.setPrefix(prefix); - } - ListBucketsRequest request = reqBuilder.build(); - final ListBucketsResponse resp; - try { - resp = rpcProxy.listBuckets(NULL_RPC_CONTROLLER, request); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - - if (resp.getStatus() == Status.OK) { - buckets.addAll( - resp.getBucketInfoList().stream() - .map(OmBucketInfo::getFromProtobuf) - .collect(Collectors.toList())); - return buckets; - } else { - throw new IOException("List Buckets failed, error: " - + resp.getStatus()); - } - } - - /** - * Create a new open session of the key, then use the returned meta info to - * talk to data node to actually write the key. - * @param args the args for the key to be allocated - * @return a handler to the key, returned client - * @throws IOException - */ - @Override - public OpenKeySession openKey(OmKeyArgs args) throws IOException { - LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder(); - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setFactor(args.getFactor()) - .setType(args.getType()) - .setKeyName(args.getKeyName()); - if (args.getDataSize() > 0) { - keyArgs.setDataSize(args.getDataSize()); - } - req.setKeyArgs(keyArgs.build()); - - final LocateKeyResponse resp; - try { - resp = rpcProxy.createKey(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Create key failed, error:" + resp.getStatus()); - } - return new OpenKeySession(resp.getID(), - OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); - } - - @Override - public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID) - throws IOException { - AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()).build(); - req.setKeyArgs(keyArgs); - req.setClientID(clientID); - - final AllocateBlockResponse resp; - try { - resp = rpcProxy.allocateBlock(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Allocate block failed, error:" + - resp.getStatus()); - } - return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation()); - } - - @Override - public void commitKey(OmKeyArgs args, long clientID) - throws IOException { - CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder(); - List locationInfoList = args.getLocationInfoList(); - Preconditions.checkNotNull(locationInfoList); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()) - .addAllKeyLocations( - locationInfoList.stream().map(OmKeyLocationInfo::getProtobuf) - .collect(Collectors.toList())).build(); - req.setKeyArgs(keyArgs); - req.setClientID(clientID); - - final CommitKeyResponse resp; - try { - resp = rpcProxy.commitKey(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Commit key failed, error:" + - resp.getStatus()); - } - } - - - @Override - public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { - LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()).build(); - req.setKeyArgs(keyArgs); - - final LocateKeyResponse resp; - try { - resp = rpcProxy.lookupKey(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Lookup key failed, error:" + - resp.getStatus()); - } - return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); - } - - @Override - public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { - RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()).build(); - req.setKeyArgs(keyArgs); - req.setToKeyName(toKeyName); - - final RenameKeyResponse resp; - try { - resp = rpcProxy.renameKey(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Rename key failed, error:" + - resp.getStatus()); - } - } - - /** - * Deletes an existing key. - * - * @param args the args of the key. - * @throws IOException - */ - @Override - public void deleteKey(OmKeyArgs args) throws IOException { - LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()).build(); - req.setKeyArgs(keyArgs); - - final LocateKeyResponse resp; - try { - resp = rpcProxy.deleteKey(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new IOException("Delete key failed, error:" + - resp.getStatus()); - } - } - - /** - * Deletes an existing empty bucket from volume. - * @param volume - Name of the volume. - * @param bucket - Name of the bucket. - * @throws IOException - */ - public void deleteBucket(String volume, String bucket) throws IOException { - DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder(); - req.setVolumeName(volume); - req.setBucketName(bucket); - final DeleteBucketResponse resp; - try { - resp = rpcProxy.deleteBucket(NULL_RPC_CONTROLLER, req.build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - if (resp.getStatus() != Status.OK) { - throw new - IOException("Delete Bucket failed, error:" + resp.getStatus()); - } - } - - /** - * List keys in a bucket. - */ - @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String prefix, int maxKeys) throws IOException { - List keys = new ArrayList<>(); - ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder(); - reqBuilder.setVolumeName(volumeName); - reqBuilder.setBucketName(bucketName); - reqBuilder.setCount(maxKeys); - - if (startKey != null) { - reqBuilder.setStartKey(startKey); - } - - if (prefix != null) { - reqBuilder.setPrefix(prefix); - } - - ListKeysRequest request = reqBuilder.build(); - final ListKeysResponse resp; - try { - resp = rpcProxy.listKeys(NULL_RPC_CONTROLLER, request); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - - if (resp.getStatus() == Status.OK) { - keys.addAll( - resp.getKeyInfoList().stream() - .map(OmKeyInfo::getFromProtobuf) - .collect(Collectors.toList())); - return keys; - } else { - throw new IOException("List Keys failed, error: " - + resp.getStatus()); - } - } - - @Override - public List getServiceList() throws IOException { - ServiceListRequest request = ServiceListRequest.newBuilder().build(); - final ServiceListResponse resp; - try { - resp = rpcProxy.getServiceList(NULL_RPC_CONTROLLER, request); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - - if (resp.getStatus() == Status.OK) { - return resp.getServiceInfoList().stream() - .map(ServiceInfo::getFromProtobuf) - .collect(Collectors.toList()); - } else { - throw new IOException("Getting service list failed, error: " - + resp.getStatus()); - } - } - - /** - * Return the proxy object underlying this protocol translator. - * - * @return the proxy object underlying this protocol translator. - */ - @Override - public Object getUnderlyingProxyObject() { - return null; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java deleted file mode 100644 index e0879d601cec1..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.protocolPB; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneManagerService; - -/** - * Protocol used to communicate with OM. - */ -@ProtocolInfo(protocolName = - "org.apache.hadoop.ozone.protocol.OzoneManagerProtocol", - protocolVersion = 1) -@InterfaceAudience.Private -public interface OzoneManagerProtocolPB - extends OzoneManagerService.BlockingInterface { -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java deleted file mode 100644 index d595edf291a5b..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.protocolPB; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 69d94b60ac43f..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -/** - * Classes related to ozone REST interface. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java deleted file mode 100644 index d57d32e0c459e..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; - -/** - * Utilities for converting protobuf classes. - */ -public final class OMPBHelper { - - private OMPBHelper() { - /** Hidden constructor */ - } - - /** - * Converts OzoneAcl into protobuf's OzoneAclInfo. - * @return OzoneAclInfo - */ - public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) { - OzoneAclInfo.OzoneAclType aclType; - switch(acl.getType()) { - case USER: - aclType = OzoneAclType.USER; - break; - case GROUP: - aclType = OzoneAclType.GROUP; - break; - case WORLD: - aclType = OzoneAclType.WORLD; - break; - default: - throw new IllegalArgumentException("ACL type is not recognized"); - } - OzoneAclInfo.OzoneAclRights aclRights; - switch(acl.getRights()) { - case READ: - aclRights = OzoneAclRights.READ; - break; - case WRITE: - aclRights = OzoneAclRights.WRITE; - break; - case READ_WRITE: - aclRights = OzoneAclRights.READ_WRITE; - break; - default: - throw new IllegalArgumentException("ACL right is not recognized"); - } - - return OzoneAclInfo.newBuilder().setType(aclType) - .setName(acl.getName()) - .setRights(aclRights) - .build(); - } - - /** - * Converts protobuf's OzoneAclInfo into OzoneAcl. - * @return OzoneAcl - */ - public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) { - OzoneAcl.OzoneACLType aclType; - switch(aclInfo.getType()) { - case USER: - aclType = OzoneAcl.OzoneACLType.USER; - break; - case GROUP: - aclType = OzoneAcl.OzoneACLType.GROUP; - break; - case WORLD: - aclType = OzoneAcl.OzoneACLType.WORLD; - break; - default: - throw new IllegalArgumentException("ACL type is not recognized"); - } - OzoneAcl.OzoneACLRights aclRights; - switch(aclInfo.getRights()) { - case READ: - aclRights = OzoneAcl.OzoneACLRights.READ; - break; - case WRITE: - aclRights = OzoneAcl.OzoneACLRights.WRITE; - break; - case READ_WRITE: - aclRights = OzoneAcl.OzoneACLRights.READ_WRITE; - break; - default: - throw new IllegalArgumentException("ACL right is not recognized"); - } - - return new OzoneAcl(aclType, aclInfo.getName(), aclRights); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java deleted file mode 100644 index 8361bac0d0658..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -/** - * Helper class for converting protobuf objects. - */ -public final class OzonePBHelper { - - private OzonePBHelper() { - /** Hidden constructor */ - } - - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java deleted file mode 100644 index 860386d9fdcc5..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -/** - * This package contains classes for the Protocol Buffers binding of Ozone - * protocols. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java deleted file mode 100644 index d476748208405..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.util; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.ClassUtil; -import org.apache.hadoop.util.ThreadUtil; -import org.apache.hadoop.utils.HddsVersionInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Properties; - -/** - * This class returns build information about Hadoop components. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class OzoneVersionInfo { - private static final Logger LOG = LoggerFactory.getLogger(OzoneVersionInfo.class); - - private Properties info; - - protected OzoneVersionInfo(String component) { - info = new Properties(); - String versionInfoFile = component + "-version-info.properties"; - InputStream is = null; - try { - is = ThreadUtil.getResourceAsStream(OzoneVersionInfo.class.getClassLoader(), - versionInfoFile); - info.load(is); - } catch (IOException ex) { - LoggerFactory.getLogger(getClass()).warn("Could not read '" + - versionInfoFile + "', " + ex.toString(), ex); - } finally { - IOUtils.closeStream(is); - } - } - - protected String _getVersion() { - return info.getProperty("version", "Unknown"); - } - - protected String _getRelease() { - return info.getProperty("release", "Unknown"); - } - - protected String _getRevision() { - return info.getProperty("revision", "Unknown"); - } - - protected String _getBranch() { - return info.getProperty("branch", "Unknown"); - } - - protected String _getDate() { - return info.getProperty("date", "Unknown"); - } - - protected String _getUser() { - return info.getProperty("user", "Unknown"); - } - - protected String _getUrl() { - return info.getProperty("url", "Unknown"); - } - - protected String _getSrcChecksum() { - return info.getProperty("srcChecksum", "Unknown"); - } - - protected String _getBuildVersion(){ - return _getVersion() + - " from " + _getRevision() + - " by " + _getUser() + - " source checksum " + _getSrcChecksum(); - } - - protected String _getProtocVersion() { - return info.getProperty("protocVersion", "Unknown"); - } - - private static OzoneVersionInfo OZONE_VERSION_INFO = new OzoneVersionInfo("ozone"); - /** - * Get the Ozone version. - * @return the Ozone version string, eg. "0.6.3-dev" - */ - public static String getVersion() { - return OZONE_VERSION_INFO._getVersion(); - } - - /** - * Get the Ozone release name. - * @return the Ozone release string, eg. "Acadia" - */ - public static String getRelease() { - return OZONE_VERSION_INFO._getRelease(); - } - - /** - * Get the Git commit hash of the repository when compiled. - * @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a" - */ - public static String getRevision() { - return OZONE_VERSION_INFO._getRevision(); - } - - /** - * Get the branch on which this originated. - * @return The branch name, e.g. "trunk" or "branches/branch-0.20" - */ - public static String getBranch() { - return OZONE_VERSION_INFO._getBranch(); - } - - /** - * The date that Ozone was compiled. - * @return the compilation date in unix date format - */ - public static String getDate() { - return OZONE_VERSION_INFO._getDate(); - } - - /** - * The user that compiled Ozone. - * @return the username of the user - */ - public static String getUser() { - return OZONE_VERSION_INFO._getUser(); - } - - /** - * Get the URL for the Ozone repository. - * @return the URL of the Ozone repository - */ - public static String getUrl() { - return OZONE_VERSION_INFO._getUrl(); - } - - /** - * Get the checksum of the source files from which Ozone was built. - * @return the checksum of the source files - */ - public static String getSrcChecksum() { - return OZONE_VERSION_INFO._getSrcChecksum(); - } - - /** - * Returns the buildVersion which includes version, - * revision, user and date. - * @return the buildVersion - */ - public static String getBuildVersion(){ - return OZONE_VERSION_INFO._getBuildVersion(); - } - - /** - * Returns the protoc version used for the build. - * @return the protoc version - */ - public static String getProtocVersion(){ - return OZONE_VERSION_INFO._getProtocVersion(); - } - - public static void main(String[] args) { - System.out.println( - " ////////////// \n" + - " //////////////////// \n" + - " //////// //////////////// \n" + - " ////// //////////////// \n" + - " ///// //////////////// / \n" + - " ///// //////// /// \n" + - " //// //////// ///// \n" + - " ///// //////////////// \n" + - " ///// //////////////// // \n" + - " //// /////////////// ///// \n" + - " ///// /////////////// //// \n" + - " ///// ////// ///// \n" + - " ////// ////// ///// \n" + - " /////////// //////// \n" + - " ////// //////////// \n" + - " /// ////////// \n" + - " / "+ getVersion() + "("+ getRelease() +")\n"); - System.out.println("Source code repository " + getUrl() + " -r " + - getRevision()); - System.out.println("Compiled by " + getUser() + " on " + getDate()); - System.out.println("Compiled with protoc " + getProtocVersion()); - System.out.println("From source with checksum " + getSrcChecksum() + "\n"); - LOG.debug("This command was run using " + - ClassUtil.findContainingJar(OzoneVersionInfo.class)); - HddsVersionInfo.main(args); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java deleted file mode 100644 index 0d5248d125e49..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; - -import java.util.LinkedList; -import java.util.List; - -/** - * BucketArgs packages all bucket related arguments to - * file system calls. - */ -public class BucketArgs extends VolumeArgs { - private final String bucketName; - private List addAcls; - private List removeAcls; - private OzoneConsts.Versioning versioning; - private StorageType storageType; - - /** - * Constructor for BucketArgs. - * - * @param volumeName - volumeName - * @param bucketName - bucket Name - * @param userArgs - userArgs - */ - public BucketArgs(String volumeName, String bucketName, UserArgs userArgs) { - super(volumeName, userArgs); - this.bucketName = bucketName; - this.versioning = OzoneConsts.Versioning.NOT_DEFINED; - this.storageType = null; - } - - - /** - * Constructor for BucketArgs. - * - * @param bucketName - bucket Name - * @param volumeArgs - volume Args - */ - public BucketArgs(String bucketName, VolumeArgs volumeArgs) { - super(volumeArgs); - this.bucketName = bucketName; - this.versioning = OzoneConsts.Versioning.NOT_DEFINED; - this.storageType = null; - } - - /** - * Constructor for BucketArgs. - * - * @param args - Bucket Args - */ - public BucketArgs(BucketArgs args) { - this(args.getBucketName(), args); - this.setAddAcls(args.getAddAcls()); - this.setRemoveAcls(args.getRemoveAcls()); - } - - /** - * Returns the Bucket Name. - * - * @return Bucket Name - */ - public String getBucketName() { - return bucketName; - } - - /** - * Returns Additive ACLs for the Bucket if specified. - * - * @return acls - */ - public List getAddAcls() { - return addAcls; - } - - /** - * Set Additive ACLs. - * - * @param acl - ACL - */ - public void setAddAcls(List acl) { - this.addAcls = acl; - } - - /** - * Returns remove ACLs for the Bucket if specified. - * - * @return acls - */ - public List getRemoveAcls() { - return removeAcls; - } - - /** - * Takes an ACL and sets the ACL object to ACL represented by the String. - * - * @param aclString - aclString - */ - public void addAcls(List aclString) throws IllegalArgumentException { - if (aclString == null) { - throw new IllegalArgumentException("ACLs cannot be null"); - } - if (this.addAcls == null) { - this.addAcls = new LinkedList<>(); - } - for (String s : aclString) { - this.addAcls.add(OzoneAcl.parseAcl(s)); - } - } - - /** - * Takes an ACL and sets the ACL object to ACL represented by the String. - * - * @param aclString - aclString - */ - public void removeAcls(List aclString) - throws IllegalArgumentException { - if (aclString == null) { - throw new IllegalArgumentException("ACLs cannot be null"); - } - if (this.removeAcls == null) { - this.removeAcls = new LinkedList<>(); - } - for (String s : aclString) { - this.removeAcls.add(OzoneAcl.parseAcl(s)); - } - } - - /** - * Set remove ACLs. - * - * @param acl - ACL - */ - public void setRemoveAcls(List acl) { - this.removeAcls = acl; - } - - - /** - * Returns Versioning Info. - * - * @return versioning - */ - public OzoneConsts.Versioning getVersioning() { - return versioning; - } - - - /** - * SetVersioning Info. - * - * @param versioning - Enum value - */ - public void setVersioning(OzoneConsts.Versioning versioning) { - this.versioning = versioning; - } - - /** - * returns the current Storage Class. - * - * @return Storage Class - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Sets the Storage Class. - * - * @param storageType Set Storage Class - */ - public void setStorageType(StorageType storageType) { - this.storageType = storageType; - } - - /** - * returns - Volume/bucketName. - * - * @return String - */ - @Override - public String getResourceName() { - return getVolumeName() + "/" + getBucketName(); - } - - /** - * Returns User/Volume name which is the parent of this - * bucket. - * - * @return String - */ - public String getParentName() { - return getUserName() + "/" + getVolumeName(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java deleted file mode 100644 index 48a4cb451364c..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -/** - * Class that packages all key Arguments. - */ -public class KeyArgs extends BucketArgs { - private String key; - private String hash; - private long size; - - /** - * Constructor for Key Args. - * - * @param volumeName - Volume Name - * @param bucketName - Bucket Name - * @param objectName - Key - */ - public KeyArgs(String volumeName, String bucketName, - String objectName, UserArgs args) { - super(volumeName, bucketName, args); - this.key = objectName; - } - - /** - * Constructor for Key Args. - * - * @param objectName - Key - * @param args - Bucket Args - */ - public KeyArgs(String objectName, BucketArgs args) { - super(args); - this.key = objectName; - } - - /** - * Get Key Name. - * - * @return String - */ - public String getKeyName() { - return this.key; - } - - /** - * Computed File hash. - * - * @return String - */ - public String getHash() { - return hash; - } - - /** - * Sets the hash String. - * - * @param hash String - */ - public void setHash(String hash) { - this.hash = hash; - } - - /** - * Returns the file size. - * - * @return long - file size - */ - public long getSize() { - return size; - } - - /** - * Set Size. - * - * @param size Size of the file - */ - public void setSize(long size) { - this.size = size; - } - - /** - * Returns the name of the resource. - * - * @return String - */ - @Override - public String getResourceName() { - return super.getResourceName() + "/" + getKeyName(); - } - - /** - * Parent name of this resource. - * - * @return String. - */ - @Override - public String getParentName() { - return super.getResourceName(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java deleted file mode 100644 index 49ca4a4f7e3a3..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -/** - * Supports listing keys with pagination. - */ -public class ListArgs { - private String prevKey; - private String prefix; - private int maxKeys; - private boolean rootScan; - private T args; - - /** - * Constructor for ListArgs. - * - * @param args - BucketArgs - * @param prefix Prefix to start Query from - * @param maxKeys Max result set - * @param prevKey - Page token - */ - public ListArgs(T args, String prefix, int maxKeys, - String prevKey) { - setArgs(args); - setPrefix(prefix); - setMaxKeys(maxKeys); - setPrevKey(prevKey); - } - - /** - * Copy Constructor for ListArgs. - * - * @param args - List Args - */ - public ListArgs(T args, ListArgs listArgs) { - this(args, listArgs.getPrefix(), listArgs.getMaxKeys(), - listArgs.getPrevKey()); - } - - /** - * Returns page token. - * - * @return String - */ - public String getPrevKey() { - return prevKey; - } - - /** - * Sets page token. - * - * @param prevKey - Page token - */ - public void setPrevKey(String prevKey) { - this.prevKey = prevKey; - } - - /** - * Gets max keys. - * - * @return int - */ - public int getMaxKeys() { - return maxKeys; - } - - /** - * Sets max keys. - * - * @param maxKeys - Maximum keys to return - */ - public void setMaxKeys(int maxKeys) { - this.maxKeys = maxKeys; - } - - /** - * Gets prefix. - * - * @return String - */ - public String getPrefix() { - return prefix; - } - - /** - * Sets prefix. - * - * @param prefix - The prefix that we are looking for - */ - public void setPrefix(String prefix) { - this.prefix = prefix; - } - - /** - * Gets args. - * @return T - */ - public T getArgs() { - return args; - } - - /** - * Sets args. - * @param args T - */ - public void setArgs(T args) { - this.args = args; - } - - /** - * Checks if we are doing a rootScan. - * @return - RootScan. - */ - public boolean isRootScan() { - return rootScan; - } - - /** - * Sets the RootScan property. - * @param rootScan - Boolean. - */ - public void setRootScan(boolean rootScan) { - this.rootScan = rootScan; - } - -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java deleted file mode 100644 index 8a75928a1c660..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.classification.InterfaceAudience; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.UriInfo; -import java.util.Arrays; - -/** - * UserArgs is used to package caller info - * and pass it down to file system. - */ -@InterfaceAudience.Private -public class UserArgs { - private String userName; - private final String requestID; - private final String hostName; - private final UriInfo uri; - private final Request request; - private final HttpHeaders headers; - private String[] groups; - - - /** - * Constructs user args. - * - * @param userName - User name - * @param requestID - Request ID - * @param hostName - Host Name - * @param req - Request - * @param info - Uri Info - * @param httpHeaders - http headers - */ - public UserArgs(String userName, String requestID, String hostName, - Request req, UriInfo info, HttpHeaders httpHeaders) { - this.hostName = hostName; - this.userName = userName; - this.requestID = requestID; - this.uri = info; - this.request = req; - this.headers = httpHeaders; - } - - /** - * Constructs user args when we don't know the user name yet. - * - * @param requestID _ Request ID - * @param hostName - Host Name - * @param req - Request - * @param info - UriInfo - * @param httpHeaders - http headers - */ - public UserArgs(String requestID, String hostName, Request req, UriInfo info, - HttpHeaders httpHeaders) { - this.hostName = hostName; - this.requestID = requestID; - this.uri = info; - this.request = req; - this.headers = httpHeaders; - } - - /** - * Returns hostname. - * - * @return String - */ - public String getHostName() { - return hostName; - } - - /** - * Returns RequestID. - * - * @return Long - */ - public String getRequestID() { - return requestID; - } - - /** - * Returns User Name. - * - * @return String - */ - public String getUserName() { - return userName; - } - - /** - * Sets the user name. - * - * @param userName Name of the user - */ - public void setUserName(String userName) { - this.userName = userName; - } - - /** - * Returns list of groups. - * - * @return String[] - */ - public String[] getGroups() { - return groups != null ? - Arrays.copyOf(groups, groups.length) : null; - } - - /** - * Sets the group list. - * - * @param groups list of groups - */ - public void setGroups(String[] groups) { - if (groups != null) { - this.groups = Arrays.copyOf(groups, groups.length); - } - } - - /** - * Returns the resource Name. - * - * @return String Resource. - */ - public String getResourceName() { - return getUserName(); - } - - /** - * Returns Http Headers for this call. - * - * @return httpHeaders - */ - public HttpHeaders getHeaders() { - return headers; - } - - /** - * Returns Request Object. - * - * @return Request - */ - public Request getRequest() { - return request; - } - - /** - * Returns UriInfo. - * - * @return UriInfo - */ - public UriInfo getUri() { - return uri; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java deleted file mode 100644 index 1d67c67f598a5..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.request.OzoneQuota; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.UriInfo; - -/** - * VolumeArgs is used to package all volume - * related arguments in the call to underlying - * file system. - */ -@InterfaceAudience.Private -public class VolumeArgs extends UserArgs { - private String adminName; - private final String volumeName; - private OzoneQuota quota; - - /** - * Returns Quota Information. - * - * @return Quota - */ - public OzoneQuota getQuota() { - return quota; - } - - /** - * Returns volume name. - * - * @return String - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Constructs volume Args. - * - * @param userName - User name - * @param volumeName - volume Name - * @param requestID _ Request ID - * @param hostName - Host Name - * @param request - Http Request - * @param info - URI info - * @param headers - http headers - * @param groups - list of groups allowed to access the volume - */ - public VolumeArgs(String userName, String volumeName, String requestID, - String hostName, Request request, UriInfo info, - HttpHeaders headers, String[] groups) { - super(userName, requestID, hostName, request, info, headers); - super.setGroups(groups); - this.volumeName = volumeName; - } - - /** - * Constructs volume Args. - * - * @param volumeName - volume Name - * @param userArgs - userArgs - */ - public VolumeArgs(String volumeName, UserArgs userArgs) { - this(userArgs.getUserName(), volumeName, userArgs.getRequestID(), - userArgs.getHostName(), userArgs.getRequest(), userArgs.getUri(), - userArgs.getHeaders(), userArgs.getGroups()); - } - - /** - * Creates VolumeArgs from another VolumeArgs. - */ - public VolumeArgs(VolumeArgs volArgs) { - this(volArgs.getVolumeName(), volArgs); - } - - /** - * Sets Quota information. - * - * @param quota - Quota Sting - * @throws IllegalArgumentException - */ - public void setQuota(String quota) throws IllegalArgumentException { - this.quota = OzoneQuota.parseQuota(quota); - } - - /** - * Sets quota information. - * - * @param quota - OzoneQuota - */ - public void setQuota(OzoneQuota quota) { - this.quota = quota; - } - - /** - * Gets admin Name. - * - * @return - Admin Name - */ - public String getAdminName() { - return adminName; - } - - /** - * Sets Admin Name. - * - * @param adminName - Admin Name - */ - public void setAdminName(String adminName) { - this.adminName = adminName; - } - - /** - * Returns UserName/VolumeName. - * - * @return String - */ - @Override - public String getResourceName() { - return super.getResourceName() + "/" + getVolumeName(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java deleted file mode 100644 index a66a773c389e8..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -/** - * REST handler value classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java deleted file mode 100644 index 9619ebdb7cd2b..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.request; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import com.fasterxml.jackson.annotation.JsonIgnore; - -/** - * represents an OzoneQuota Object that can be applied to - * a storage volume. - */ -@InterfaceAudience.Private -public class OzoneQuota { - - private Units unit; - private int size; - - /** Quota Units.*/ - public enum Units {UNDEFINED, BYTES, MB, GB, TB} - - /** - * Returns size. - * - * @return int - */ - public int getSize() { - return size; - } - - /** - * Returns Units. - * - * @return Unit in MB, GB or TB - */ - public Units getUnit() { - return unit; - } - - /** - * Constructs a default Quota object. - */ - public OzoneQuota() { - this.size = 0; - this.unit = Units.UNDEFINED; - } - - /** - * Constructor for Ozone Quota. - * - * @param size - Integer Size - * @param unit MB, GB or TB - */ - public OzoneQuota(int size, Units unit) { - this.size = size; - this.unit = unit; - } - - /** - * Formats a quota as a string. - * - * @param quota the quota to format - * @return string representation of quota - */ - public static String formatQuota(OzoneQuota quota) { - return String.valueOf(quota.size) + quota.unit; - } - - /** - * Parses a user provided string and returns the - * Quota Object. - * - * @param quotaString Quota String - * - * @return OzoneQuota object - * - * @throws IllegalArgumentException - */ - public static OzoneQuota parseQuota(String quotaString) - throws IllegalArgumentException { - - if ((quotaString == null) || (quotaString.isEmpty())) { - throw new IllegalArgumentException( - "Quota string cannot be null or empty."); - } - - if (isRemove(quotaString)) { - throw new IllegalArgumentException("Remove is invalid in this context."); - } - - String uppercase = quotaString.toUpperCase().replaceAll("\\s+", ""); - String size = ""; - int nSize; - Units currUnit = Units.MB; - Boolean found = false; - if (uppercase.endsWith(Header.OZONE_QUOTA_MB)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_MB.length()); - currUnit = Units.MB; - found = true; - } - - if (uppercase.endsWith(Header.OZONE_QUOTA_GB)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_GB.length()); - currUnit = Units.GB; - found = true; - } - - if (uppercase.endsWith(Header.OZONE_QUOTA_TB)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_TB.length()); - currUnit = Units.TB; - found = true; - } - - if (uppercase.endsWith(Header.OZONE_QUOTA_BYTES)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_BYTES.length()); - currUnit = Units.BYTES; - found = true; - } - - if (!found) { - throw new IllegalArgumentException( - "Quota unit not recognized. Supported values are BYTES, MB, GB and " + - "TB."); - } - - nSize = Integer.parseInt(size); - if (nSize < 0) { - throw new IllegalArgumentException("Quota cannot be negative."); - } - - return new OzoneQuota(nSize, currUnit); - } - - - /** - * Checks if Quota String is just as remove operation. - * - * @param quotaString User provided quota String - * - * @return True if it is Remove, false otherwise - */ - public static boolean isRemove(String quotaString) { - - return (quotaString != null) && - (quotaString.compareToIgnoreCase(Header.OZONE_QUOTA_REMOVE) == 0); - } - - /** - * Returns size in Bytes or -1 if there is no Quota. - */ - @JsonIgnore - public long sizeInBytes() { - switch (this.unit) { - case BYTES: - return this.getSize(); - case MB: - return this.getSize() * OzoneConsts.MB; - case GB: - return this.getSize() * OzoneConsts.GB; - case TB: - return this.getSize() * OzoneConsts.TB; - case UNDEFINED: - default: - return -1; - } - } - - /** - * Returns OzoneQuota corresponding to size in bytes. - * - * @param sizeInBytes size in bytes to be converted - * - * @return OzoneQuota object - */ - public static OzoneQuota getOzoneQuota(long sizeInBytes) { - long size; - Units unit; - if (sizeInBytes % OzoneConsts.TB == 0) { - size = sizeInBytes / OzoneConsts.TB; - unit = Units.TB; - } else if (sizeInBytes % OzoneConsts.GB == 0) { - size = sizeInBytes / OzoneConsts.GB; - unit = Units.GB; - } else if (sizeInBytes % OzoneConsts.MB == 0) { - size = sizeInBytes / OzoneConsts.MB; - unit = Units.MB; - } else { - size = sizeInBytes; - unit = Units.BYTES; - } - return new OzoneQuota((int)size, unit); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java deleted file mode 100644 index 4fbc18ff6e4f5..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.request; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java deleted file mode 100644 index e66cd204df089..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; -import com.google.common.base.Preconditions; - -/** - * BucketInfo class, this is used as response class to send - * Json info about a bucket back to a client. - */ -public class BucketInfo implements Comparable { - static final String BUCKET_INFO = "BUCKET_INFO_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(BucketInfo.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"bytesUsed", "keyCount"}; - - FilterProvider filters = new SimpleFilterProvider().addFilter(BUCKET_INFO, - SimpleBeanPropertyFilter.serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private String volumeName; - private String bucketName; - private String createdOn; - private List acls; - private OzoneConsts.Versioning versioning; - private StorageType storageType; - private long bytesUsed; - private long keyCount; - - /** - * Constructor for BucketInfo. - * - * @param volumeName - * @param bucketName - */ - public BucketInfo(String volumeName, String bucketName) { - this.volumeName = volumeName; - this.bucketName = bucketName; - } - - - /** - * Default constructor for BucketInfo. - */ - public BucketInfo() { - acls = new LinkedList(); - } - - /** - * Parse a JSON string into BucketInfo Object. - * - * @param jsonString - Json String - * - * @return - BucketInfo - * - * @throws IOException - */ - public static BucketInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - /** - * Returns a List of ACL on the Bucket. - * - * @return List of Acls - */ - public List getAcls() { - return acls; - } - - /** - * Sets ACls. - * - * @param acls - Acls list - */ - public void setAcls(List acls) { - this.acls = acls; - } - - /** - * Returns Storage Type info. - * - * @return Storage Type of the bucket - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Sets the Storage Type. - * - * @param storageType - Storage Type - */ - public void setStorageType(StorageType storageType) { - this.storageType = storageType; - } - - /** - * Returns versioning. - * - * @return versioning Enum - */ - public OzoneConsts.Versioning getVersioning() { - return versioning; - } - - /** - * Sets Versioning. - * - * @param versioning - */ - public void setVersioning(OzoneConsts.Versioning versioning) { - this.versioning = versioning; - } - - - /** - * Gets bucket Name. - * - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Sets bucket Name. - * - * @param bucketName - Name of the bucket - */ - public void setBucketName(String bucketName) { - this.bucketName = bucketName; - } - - /** - * Sets creation time of the bucket. - * - * @param creationTime - Date String - */ - public void setCreatedOn(String creationTime) { - this.createdOn = creationTime; - } - - /** - * Returns creation time. - * - * @return creation time of bucket. - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and keyCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - * - * The reason why both toJSONString exists and toDBString exists - * is because toJSONString supports an external facing contract with - * REST clients. However server internally would want to add more - * fields to this class. The distinction helps in serializing all - * fields vs. only fields that are part of REST protocol. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Returns Volume Name. - * - * @return String volume name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the Volume Name of the bucket. - * - * @param volumeName - volumeName - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * Please note : BucketInfo compare functions are used only within the - * context of a volume, hence volume name is purposefully ignored in - * compareTo, equal and hashcode functions of this class. - */ - @Override - public int compareTo(BucketInfo o) { - Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName())); - return this.bucketName.compareTo(o.getBucketName()); - } - - /** - * Checks if two bucketInfo's are equal. - * @param o Object BucketInfo - * @return True or False - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof BucketInfo)) { - return false; - } - - BucketInfo that = (BucketInfo) o; - Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName())); - return bucketName.equals(that.bucketName); - - } - - /** - * Hash Code for this object. - * @return int - */ - @Override - public int hashCode() { - return bucketName.hashCode(); - } - - /** - * Get the number of bytes used by this bucket. - * - * @return long - */ - public long getBytesUsed() { - return bytesUsed; - } - - /** - * Set bytes Used. - * - * @param bytesUsed - bytesUsed - */ - public void setBytesUsed(long bytesUsed) { - this.bytesUsed = bytesUsed; - } - - /** - * Get Key Count inside this bucket. - * - * @return - KeyCount - */ - public long getKeyCount() { - return keyCount; - } - - /** - * Set Key Count inside this bucket. - * - * @param keyCount - Sets the Key Count - */ - public void setKeyCount(long keyCount) { - this.keyCount = keyCount; - } - - /** - * This class allows us to create custom filters - * for the Json serialization. - */ - @JsonFilter(BUCKET_INFO) - class MixIn { - - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java deleted file mode 100644 index ba47bee767995..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * Represents an Ozone key Object. - */ -public class KeyInfo implements Comparable { - static final String OBJECT_INFO = "OBJECT_INFO_FILTER"; - - private static final ObjectReader READER = - new ObjectMapper().readerFor(KeyInfo.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"dataFileName"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(OBJECT_INFO, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - /** - * This class allows us to create custom filters - * for the Json serialization. - */ - @JsonFilter(OBJECT_INFO) - class MixIn { - - } - private long version; - private String md5hash; - private String createdOn; - private String modifiedOn; - private long size; - private String keyName; - - private String dataFileName; - - /** - * When this key was created. - * - * @return Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * When this key was modified. - * - * @return Date String - */ - public String getModifiedOn() { - return modifiedOn; - } - - /** - * When this key was created. - * - * @param createdOn - Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * When this key was modified. - * - * @param modifiedOn - Date String - */ - public void setModifiedOn(String modifiedOn) { - this.modifiedOn = modifiedOn; - } - - /** - * Full path to where the actual data for this key is stored. - * - * @return String - */ - public String getDataFileName() { - return dataFileName; - } - - /** - * Sets up where the file path is stored. - * - * @param dataFileName - Data File Name - */ - public void setDataFileName(String dataFileName) { - this.dataFileName = dataFileName; - } - - /** - * Gets the Keyname of this object. - * - * @return String - */ - public String getKeyName() { - return keyName; - } - - /** - * Sets the Key name of this object. - * - * @param keyName - String - */ - public void setKeyName(String keyName) { - this.keyName = keyName; - } - - /** - * Returns the MD5 Hash for the data of this key. - * - * @return String MD5 - */ - public String getMd5hash() { - return md5hash; - } - - /** - * Sets the MD5 of this file. - * - * @param md5hash - Md5 of this file - */ - public void setMd5hash(String md5hash) { - this.md5hash = md5hash; - } - - /** - * Number of bytes stored in the data part of this key. - * - * @return long size of the data file - */ - public long getSize() { - return size; - } - - /** - * Sets the size of the Data part of this key. - * - * @param size - Size in long - */ - public void setSize(long size) { - this.size = size; - } - - /** - * Version of this key. - * - * @return - returns the version of this key. - */ - public long getVersion() { - return version; - } - - /** - * Sets the version of this key. - * - * @param version - Version String - */ - public void setVersion(long version) { - this.version = version; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * - * @return a negative integer, zero, or a positive integer as this object - * is less than, equal to, or greater than the specified object. - * - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(KeyInfo o) { - if (this.keyName.compareTo(o.getKeyName()) != 0) { - return this.keyName.compareTo(o.getKeyName()); - } - - if (this.getVersion() == o.getVersion()) { - return 0; - } - if (this.getVersion() < o.getVersion()) { - return -1; - } - return 1; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfo keyInfo = (KeyInfo) o; - - return new EqualsBuilder() - .append(version, keyInfo.version) - .append(keyName, keyInfo.keyName) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(version) - .append(keyName) - .toHashCode(); - } - - /** - - * Parse a string to retuen BucketInfo Object. - * - * @param jsonString - Json String - * - * @return - BucketInfo - * - * @throws IOException - */ - public static KeyInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and keyCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java deleted file mode 100644 index 7f2ba098d7915..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -import java.util.List; - -/** - * Represents an Ozone key Object with detail information of location. - */ -public class KeyInfoDetails extends KeyInfo { - /** - * a list of Map which maps localID to ContainerID - * to specify replica locations. - */ - private List keyLocations; - - /** - * Set details of key location. - * - * @param keyLocations - details of key location - */ - public void setKeyLocations(List keyLocations) { - this.keyLocations = keyLocations; - } - - /** - * Returns details of key location. - * - * @return volumeName - */ - public List getKeyLocations() { - return keyLocations; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfoDetails that = (KeyInfoDetails) o; - - return new EqualsBuilder() - .append(getVersion(), that.getVersion()) - .append(getKeyName(), that.getKeyName()) - .append(keyLocations, that.getKeyLocations()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(getVersion()) - .append(getKeyName()) - .append(keyLocations) - .toHashCode(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java deleted file mode 100644 index d03eff74753a0..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.response; - -/** - * KeyLocation class is used used for parsing json response - * when KeyInfoDetails Call is made. - */ -public class KeyLocation { - /** - * Which container this key stored. - */ - private final long containerID; - /** - * Which block this key stored inside a container. - */ - private final long localID; - /** - * Data length of this key replica. - */ - private final long length; - /** - * Offset of this key. - */ - private final long offset; - - /** - * Constructs KeyLocation. - */ - public KeyLocation(long containerID, long localID, - long length, long offset) { - this.containerID = containerID; - this.localID = localID; - this.length = length; - this.offset = offset; - } - - /** - * Returns the containerID of this Key. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the localID of this Key. - */ - public long getLocalID() { - return localID; - } - - /** - * Returns the length of this Key. - */ - public long getLength() { - return length; - } - - /** - * Returns the offset of this Key. - */ - public long getOffset() { - return offset; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java deleted file mode 100644 index bc4e65be0df3e..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - - -import java.io.IOException; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * List Bucket is the response for the ListBucket Query. - */ -public class ListBuckets { - static final String BUCKET_LIST = "BUCKET_LIST_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(ListBuckets.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"dataFileName"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(BUCKET_LIST, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private List buckets; - - /** - * Constructor for ListBuckets. - * @param buckets - List of buckets owned by this user - */ - public ListBuckets(List buckets) { - this.buckets = buckets; - - } - - /** - * Constructor for ListBuckets. - */ - public ListBuckets() { - this.buckets = new LinkedList(); - } - - /** - * Parses a String to return ListBuckets object. - * - * @param data - Json String - * - * @return - ListBuckets - * - * @throws IOException - */ - public static ListBuckets parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Returns a list of Buckets. - * - * @return Bucket list - */ - public List getBuckets() { - return buckets; - } - - /** - * Sets the list of buckets owned by this user. - * - * @param buckets - List of Buckets - */ - public void setBuckets(List buckets) { - this.buckets = buckets; - } - - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and keyCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Sorts the buckets based on bucketName. - * This is useful when we return the list of buckets - */ - public void sort() { - Collections.sort(buckets); - } - - /** - * Add a new bucket to the list of buckets. - * @param bucketInfo - bucket Info - */ - public void addBucket(BucketInfo bucketInfo){ - this.buckets.add(bucketInfo); - } - - /** - * This class allows us to create custom filters - * for the Json serialization. - */ - @JsonFilter(BUCKET_LIST) - class MixIn { - - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java deleted file mode 100644 index 9dc77d2c234e3..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.ListArgs; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; -import com.google.common.base.Preconditions; - -/** - * This class the represents the list of keys (Objects) in a bucket. - */ -public class ListKeys { - static final String OBJECT_LIST = "OBJECT_LIST_FILTER"; - - private static final ObjectReader READER = - new ObjectMapper().readerFor(ListKeys.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"dataFileName"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(OBJECT_LIST, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private String name; - private String prefix; - private long maxKeys; - private boolean truncated; - private List keyList; - - /** - * Default constructor needed for json serialization. - */ - public ListKeys() { - this.keyList = new LinkedList<>(); - } - - /** - * Constructor for ListKeys. - * - * @param args ListArgs - * @param truncated is truncated - */ - public ListKeys(ListArgs args, boolean truncated) { - Preconditions.checkState(args.getArgs() instanceof BucketArgs); - this.name = ((BucketArgs) args.getArgs()).getBucketName(); - this.prefix = args.getPrefix(); - this.maxKeys = args.getMaxKeys(); - this.truncated = truncated; - } - - /** - * Converts a Json string to POJO. - * @param jsonString - json string. - * @return ListObject - * @throws IOException - Json conversion error. - */ - public static ListKeys parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - /** - * Returns a list of Objects. - * - * @return List of KeyInfo Objects. - */ - public List getKeyList() { - return keyList; - } - - /** - * Sets the list of Objects. - * - * @param objectList - List of Keys - */ - public void setKeyList(List objectList) { - this.keyList = objectList; - } - - /** - * Gets the Max Key Count. - * - * @return long - */ - public long getMaxKeys() { - return maxKeys; - } - - /** - * Gets bucket Name. - * - * @return String - */ - public String getName() { - return name; - } - - /** - * Gets Prefix. - * - * @return String - */ - public String getPrefix() { - return prefix; - } - - /** - * Gets truncated Status. - * - * @return Boolean - */ - public boolean isTruncated() { - return truncated; - } - - /** - * Sets the value of truncated. - * - * @param value - Boolean - */ - public void setTruncated(boolean value) { - this.truncated = value; - } - - /** - * Returns a JSON string of this object. After stripping out bytesUsed and - * keyCount. - * - * @return String - * @throws IOException - On json Errors. - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - * - * @return String - * @throws IOException - on json errors. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Sorts the keys based on name and version. This is useful when we return the - * list of keys. - */ - public void sort() { - Collections.sort(keyList); - } - - /** - * Add a new key to the list of keys. - * @param keyInfo - key Info - */ - public void addKey(KeyInfo keyInfo){ - this.keyList.add(keyInfo); - } - - /** - * This class allows us to create custom filters for the Json serialization. - */ - @JsonFilter(OBJECT_LIST) - class MixIn { - - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java deleted file mode 100644 index b918349d49372..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * List Volume Class is the class that is returned in JSON format to - * users when they call ListVolumes. - */ -@InterfaceAudience.Private -public class ListVolumes { - private List volumes; - - static final String VOLUME_LIST = "VOLUME_LIST_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(ListVolumes.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"bytesUsed", "bucketCount"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(VOLUME_LIST, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - /** - * Used for json filtering. - */ - @JsonFilter(VOLUME_LIST) - class MixIn { - } - - /** - * Constructs ListVolume objects. - */ - public ListVolumes() { - this.volumes = new LinkedList(); - } - - /** - * Gets the list of volumes. - * - * @return List of VolumeInfo Objects - */ - public List getVolumes() { - return volumes; - } - - - /** - * Sets volume info. - * - * @param volumes - List of Volumes - */ - public void setVolumes(List volumes) { - this.volumes = volumes; - } - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and bucketCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * When we serialize a volumeInfo to our database - * we will use all fields. However the toJsonString - * will strip out bytesUsed and bucketCount from the - * volume Info - * - * @return Json String - * - * @throws IOException - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Parses a String to return ListVolumes object. - * - * @param data - Json String - * - * @return - ListVolumes - * - * @throws IOException - */ - public static ListVolumes parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Adds a new volume info to the List. - * - * @param info - VolumeInfo - */ - public void addVolume(VolumeInfo info) { - this.volumes.add(info); - } - - /** - * Sorts the volume names based on volume name. - * This is useful when we return the list of volume names - */ - public void sort() { - Collections.sort(volumes); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java deleted file mode 100644 index 112b27e541530..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.response; - - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * VolumeInfo Class is the Java class that represents - * Json when VolumeInfo Call is made. - */ -@InterfaceAudience.Private -public class VolumeInfo implements Comparable { - - static final String VOLUME_INFO = "VOLUME_INFO_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(VolumeInfo.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"bytesUsed", "bucketCount"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(VOLUME_INFO, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - /** - * Custom Json Filter Class. - */ - @JsonFilter(VOLUME_INFO) - class MixIn { - } - private VolumeOwner owner; - private OzoneQuota quota; - private String volumeName; - private String createdOn; - private String createdBy; - - private long bytesUsed; - private long bucketCount; - - - /** - * Constructor for VolumeInfo. - * - * @param volumeName - Name of the Volume - * @param createdOn _ Date String - * @param createdBy - Person who created it - */ - public VolumeInfo(String volumeName, String createdOn, String createdBy) { - this.createdOn = createdOn; - this.volumeName = volumeName; - this.createdBy = createdBy; - } - - /** - * Constructor for VolumeInfo. - */ - public VolumeInfo() { - } - - /** - * Returns the name of the person who created this volume. - * - * @return Name of Admin who created this - */ - public String getCreatedBy() { - return createdBy; - } - - /** - * Sets the user name of the person who created this volume. - * - * @param createdBy - UserName - */ - public void setCreatedBy(String createdBy) { - this.createdBy = createdBy; - } - - /** - * Gets the date on which this volume was created. - * - * @return - Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * Sets the date string. - * - * @param createdOn - Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * Returns the owner info. - * - * @return - OwnerInfo - */ - public VolumeOwner getOwner() { - return owner; - } - - /** - * Sets the owner. - * - * @param owner - OwnerInfo - */ - public void setOwner(VolumeOwner owner) { - this.owner = owner; - } - - /** - * Returns the quota information on a volume. - * - * @return Quota - */ - public OzoneQuota getQuota() { - return quota; - } - - /** - * Sets the quota info. - * - * @param quota - Quota Info - */ - public void setQuota(OzoneQuota quota) { - this.quota = quota; - } - - /** - * gets the volume name. - * - * @return - Volume Name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the volume name. - * - * @param volumeName - Volume Name - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and bucketCount - * - * @return String - json string - * @throws IOException - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * When we serialize a volumeInfo to our database - * we will use all fields. However the toJsonString - * will strip out bytesUsed and bucketCount from the - * volume Info - * - * @return Json String - * - * @throws IOException - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - - /** - * Comparable Interface. - * @param o VolumeInfo Object. - * @return Result of comparison - */ - @Override - public int compareTo(VolumeInfo o) { - return this.volumeName.compareTo(o.getVolumeName()); - } - - /** - * Gets the number of bytesUsed by this volume. - * - * @return long - Bytes used - */ - public long getBytesUsed() { - return bytesUsed; - } - - /** - * Sets number of bytesUsed by this volume. - * - * @param bytesUsed - Number of bytesUsed - */ - public void setBytesUsed(long bytesUsed) { - this.bytesUsed = bytesUsed; - } - - /** - * Returns VolumeInfo class from json string. - * - * @param data - Json String - * - * @return VolumeInfo - * - * @throws IOException - */ - public static VolumeInfo parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Indicates whether some other object is "equal to" this one. - * - * @param obj the reference object with which to compare. - * - * @return {@code true} if this object is the same as the obj - * argument; {@code false} otherwise. - */ - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - VolumeInfo otherInfo = (VolumeInfo) obj; - return otherInfo.getVolumeName().equals(this.getVolumeName()); - } - - /** - * Returns a hash code value for the object. This method is - * supported for the benefit of hash tables such as those provided by - * HashMap. - * @return a hash code value for this object. - * - * @see Object#equals(Object) - * @see System#identityHashCode - */ - @Override - public int hashCode() { - return getVolumeName().hashCode(); - } - - /** - * Total number of buckets under this volume. - * - * @return - bucketCount - */ - public long getBucketCount() { - return bucketCount; - } - - /** - * Sets the buckets count. - * - * @param bucketCount - Bucket Count - */ - public void setBucketCount(long bucketCount) { - this.bucketCount = bucketCount; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java deleted file mode 100644 index afb0460538c0a..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.response; - -import com.fasterxml.jackson.annotation.JsonInclude; -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Volume Owner represents the owner of a volume. - * - * This is a class instead of a string since we might need to extend this class - * to support other forms of authentication. - */ -@InterfaceAudience.Private -public class VolumeOwner { - @JsonInclude(JsonInclude.Include.NON_NULL) - private String name; - - /** - * Constructor for VolumeOwner. - * - * @param name - name of the User - */ - public VolumeOwner(String name) { - this.name = name; - } - - /** - * Constructs Volume Owner. - */ - public VolumeOwner() { - name = null; - } - - /** - * Returns the user name. - * - * @return Name - */ - public String getName() { - return name; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java deleted file mode 100644 index 3bf66c861be57..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.response; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java deleted file mode 100644 index 22fff56d7d72d..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.utils; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.charset.Charset; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Locale; -import java.util.TimeZone; -import java.util.UUID; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.ozone.OzoneConsts; - -import com.google.common.base.Preconditions; - -/** - * Set of Utility functions used in ozone. - */ -@InterfaceAudience.Private -public final class OzoneUtils { - - public static final String ENCODING_NAME = "UTF-8"; - public static final Charset ENCODING = Charset.forName(ENCODING_NAME); - - private OzoneUtils() { - // Never constructed - } - - /** - * Date format that used in ozone. Here the format is thread safe to use. - */ - private static final ThreadLocal DATE_FORMAT = - new ThreadLocal() { - @Override - protected SimpleDateFormat initialValue() { - SimpleDateFormat format = new SimpleDateFormat( - OzoneConsts.OZONE_DATE_FORMAT, Locale.US); - format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); - - return format; - } - }; - - /** - * Verifies that max key length is a valid value. - * - * @param length - * The max key length to be validated - * - * @throws IllegalArgumentException - */ - public static void verifyMaxKeyLength(String length) - throws IllegalArgumentException { - int maxKey = 0; - try { - maxKey = Integer.parseInt(length); - } catch (NumberFormatException nfe) { - throw new IllegalArgumentException( - "Invalid max key length, the vaule should be digital."); - } - - if (maxKey <= 0) { - throw new IllegalArgumentException( - "Invalid max key length, the vaule should be a positive number."); - } - } - - /** - * Returns a random Request ID. - * - * Request ID is returned to the client as well as flows through the system - * facilitating debugging on why a certain request failed. - * - * @return String random request ID - */ - public static String getRequestID() { - return UUID.randomUUID().toString(); - } - - /** - * Return host name if possible. - * - * @return Host Name or localhost - */ - public static String getHostName() { - String host = "localhost"; - try { - host = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - // Ignore the error - } - return host; - } - - /** - * Get the path for datanode id file. - * - * @param conf - Configuration - * @return the path of datanode id as string - */ - public static String getDatanodeIdFilePath(Configuration conf) { - return HddsUtils.getDatanodeIdFilePath(conf); - } - - /** - * Convert time in millisecond to a human readable format required in ozone. - * @return a human readable string for the input time - */ - public static String formatTime(long millis) { - return DATE_FORMAT.get().format(millis); - } - - /** - * Convert time in ozone date format to millisecond. - * @return time in milliseconds - */ - public static long formatDate(String date) throws ParseException { - Preconditions.checkNotNull(date, "Date string should not be null."); - return DATE_FORMAT.get().parse(date).getTime(); - } - - public static boolean isOzoneEnabled(Configuration conf) { - return HddsUtils.isHddsEnabled(conf); - } - - - /** - * verifies that bucket name / volume name is a valid DNS name. - * - * @param resName Bucket or volume Name to be validated - * - * @throws IllegalArgumentException - */ - public static void verifyResourceName(String resName) - throws IllegalArgumentException { - - if (resName == null) { - throw new IllegalArgumentException("Bucket or Volume name is null"); - } - - if ((resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH) || - (resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH)) { - throw new IllegalArgumentException( - "Bucket or Volume length is illegal, " + - "valid length is 3-63 characters"); - } - - if ((resName.charAt(0) == '.') || (resName.charAt(0) == '-')) { - throw new IllegalArgumentException( - "Bucket or Volume name cannot start with a period or dash"); - } - - if ((resName.charAt(resName.length() - 1) == '.') || - (resName.charAt(resName.length() - 1) == '-')) { - throw new IllegalArgumentException( - "Bucket or Volume name cannot end with a period or dash"); - } - - boolean isIPv4 = true; - char prev = (char) 0; - - for (int index = 0; index < resName.length(); index++) { - char currChar = resName.charAt(index); - - if (currChar != '.') { - isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4; - } - - if (currChar > 'A' && currChar < 'Z') { - throw new IllegalArgumentException( - "Bucket or Volume name does not support uppercase characters"); - } - - if ((currChar != '.') && (currChar != '-')) { - if ((currChar < '0') || (currChar > '9' && currChar < 'a') || - (currChar > 'z')) { - throw new IllegalArgumentException("Bucket or Volume name has an " + - "unsupported character : " + - currChar); - } - } - - if ((prev == '.') && (currChar == '.')) { - throw new IllegalArgumentException("Bucket or Volume name should not " + - "have two contiguous periods"); - } - - if ((prev == '-') && (currChar == '.')) { - throw new IllegalArgumentException( - "Bucket or Volume name should not have period after dash"); - } - - if ((prev == '.') && (currChar == '-')) { - throw new IllegalArgumentException( - "Bucket or Volume name should not have dash after period"); - } - prev = currChar; - } - - if (isIPv4) { - throw new IllegalArgumentException( - "Bucket or Volume name cannot be an IPv4 address or all numeric"); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java deleted file mode 100644 index 178157fcacc42..0000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.utils; diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto deleted file mode 100644 index 975c790f7841a..0000000000000 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ /dev/null @@ -1,481 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ozone.protocol.proto"; -option java_outer_classname = "OzoneManagerProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.ozone; - -/** -This is file contains the protocol to communicate with -Ozone Manager. Ozone Manager manages the namespace for ozone. -This is similar to Namenode for Ozone. -*/ - -import "hdfs.proto"; -import "hdds.proto"; - -enum Status { - OK = 1; - VOLUME_NOT_UNIQUE = 2; - VOLUME_NOT_FOUND = 3; - VOLUME_NOT_EMPTY = 4; - VOLUME_ALREADY_EXISTS = 5; - USER_NOT_FOUND = 6; - USER_TOO_MANY_VOLUMES = 7; - BUCKET_NOT_FOUND = 8; - BUCKET_NOT_EMPTY = 9; - BUCKET_ALREADY_EXISTS = 10; - KEY_ALREADY_EXISTS = 11; - KEY_NOT_FOUND = 12; - INVALID_KEY_NAME = 13; - ACCESS_DENIED = 14; - INTERNAL_ERROR = 15; - KEY_ALLOCATION_ERROR = 16; - KEY_DELETION_ERROR = 17; - KEY_RENAME_ERROR = 18; - METADATA_ERROR = 19; - OM_NOT_INITIALIZED = 20; - SCM_VERSION_MISMATCH_ERROR = 21; -} - - -message VolumeInfo { - required string adminName = 1; - required string ownerName = 2; - required string volume = 3; - optional uint64 quotaInBytes = 4; - repeated hadoop.hdds.KeyValue metadata = 5; - repeated OzoneAclInfo volumeAcls = 6; - required uint64 creationTime = 7; -} - -/** - Creates a volume -*/ -message CreateVolumeRequest { - required VolumeInfo volumeInfo = 1; -} - -message CreateVolumeResponse { - - required Status status = 1; -} - -message VolumeList { - repeated string volumeNames = 1; -} - -/** - Changes the Volume Properties -- like ownership and quota for a volume. -*/ -message SetVolumePropertyRequest { - required string volumeName = 1; - optional string ownerName = 2; - optional uint64 quotaInBytes = 3; -} - -message SetVolumePropertyResponse { - required Status status = 1; -} - -/** - * Checks if the user has specified permissions for the volume - */ -message CheckVolumeAccessRequest { - required string volumeName = 1; - required OzoneAclInfo userAcl = 2; -} - -message CheckVolumeAccessResponse { - - required Status status = 1; -} - - -/** - Returns information about a volume. -*/ - -message InfoVolumeRequest { - required string volumeName = 1; -} - -message InfoVolumeResponse { - required Status status = 1; - optional VolumeInfo volumeInfo = 2; - -} - -/** - Deletes an existing volume. -*/ -message DeleteVolumeRequest { - required string volumeName = 1; -} - -message DeleteVolumeResponse { - required Status status = 1; -} - - -/** - List Volumes -- List all volumes in the cluster or by user. -*/ - -message ListVolumeRequest { - enum Scope { - USER_VOLUMES = 1; // User volumes -- called by user - VOLUMES_BY_USER = 2; // User volumes - called by Admin - VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster - } - required Scope scope = 1; - optional string userName = 2; - optional string prefix = 3; - optional string prevKey = 4; - optional uint32 maxKeys = 5; -} - -message ListVolumeResponse { - required Status status = 1; - repeated VolumeInfo volumeInfo = 2; -} - -message BucketInfo { - required string volumeName = 1; - required string bucketName = 2; - repeated OzoneAclInfo acls = 3; - required bool isVersionEnabled = 4 [default = false]; - required hadoop.hdfs.StorageTypeProto storageType = 5 [default = DISK]; - required uint64 creationTime = 6; -} - -message BucketArgs { - required string volumeName = 1; - required string bucketName = 2; - repeated OzoneAclInfo addAcls = 3; - repeated OzoneAclInfo removeAcls = 4; - optional bool isVersionEnabled = 5; - optional hadoop.hdfs.StorageTypeProto storageType = 6; -} - -message OzoneAclInfo { - enum OzoneAclType { - USER = 1; - GROUP = 2; - WORLD = 3; - } - enum OzoneAclRights { - READ = 1; - WRITE = 2; - READ_WRITE = 3; - } - required OzoneAclType type = 1; - required string name = 2; - required OzoneAclRights rights = 3; -} - -message CreateBucketRequest { - required BucketInfo bucketInfo = 1; -} - -message CreateBucketResponse { - required Status status = 1; -} - -message InfoBucketRequest { - required string volumeName = 1; - required string bucketName = 2; -} - -message InfoBucketResponse { - required Status status = 1; - optional BucketInfo bucketInfo = 2; -} - -message ListBucketsRequest { - required string volumeName = 1; - optional string startKey = 2; - optional string prefix = 3; - optional int32 count = 4; -} - -message ListBucketsResponse { - required Status status = 1; - repeated BucketInfo bucketInfo = 2; -} - -message KeyArgs { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - optional uint64 dataSize = 4; - optional hadoop.hdds.ReplicationType type = 5; - optional hadoop.hdds.ReplicationFactor factor = 6; - repeated KeyLocation keyLocations = 7; -} - -message KeyLocation { - required hadoop.hdds.BlockID blockID = 1; - required bool shouldCreateContainer = 2; - required uint64 offset = 3; - required uint64 length = 4; - // indicated at which version this block gets created. - optional uint64 createVersion = 5; -} - -message KeyLocationList { - optional uint64 version = 1; - repeated KeyLocation keyLocations = 2; -} - -message KeyInfo { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - required uint64 dataSize = 4; - required hadoop.hdds.ReplicationType type = 5; - required hadoop.hdds.ReplicationFactor factor = 6; - repeated KeyLocationList keyLocationList = 7; - required uint64 creationTime = 8; - required uint64 modificationTime = 9; - optional uint64 latestVersion = 10; -} - -message LocateKeyRequest { - required KeyArgs keyArgs = 1; -} - -message LocateKeyResponse { - required Status status = 1; - optional KeyInfo keyInfo = 2; - // clients' followup request may carry this ID for stateful operations (similar - // to a cookie). - optional uint64 ID = 3; - // TODO : allow specifiying a particular version to read. - optional uint64 openVersion = 4; -} - -message SetBucketPropertyRequest { - required BucketArgs bucketArgs = 1; -} - -message SetBucketPropertyResponse { - required Status status = 1; -} - -message RenameKeyRequest{ - required KeyArgs keyArgs = 1; - required string toKeyName = 2; -} - -message RenameKeyResponse{ - required Status status = 1; -} - -message DeleteBucketRequest { - required string volumeName = 1; - required string bucketName = 2; -} - -message DeleteBucketResponse { - required Status status = 1; -} - -message ListKeysRequest { - required string volumeName = 1; - required string bucketName = 2; - optional string startKey = 3; - optional string prefix = 4; - optional int32 count = 5; -} - -message ListKeysResponse { - required Status status = 1; - repeated KeyInfo keyInfo = 2; -} - -message AllocateBlockRequest { - required KeyArgs keyArgs = 1; - required uint64 clientID = 2; -} - -message AllocateBlockResponse { - required Status status = 1; - optional KeyLocation keyLocation = 2; -} - -message CommitKeyRequest { - required KeyArgs keyArgs = 1; - required uint64 clientID = 2; -} - -message CommitKeyResponse { - required Status status = 1; -} - -message ServiceListRequest { -} - -message ServiceListResponse { - required Status status = 1; - repeated ServiceInfo serviceInfo = 2; -} - -message ServicePort { - enum Type { - RPC = 1; - HTTP = 2; - HTTPS = 3; - RATIS = 4; - }; - required Type type = 1; - required uint32 value = 2; -} - -message ServiceInfo { - required hadoop.hdds.NodeType nodeType = 1; - required string hostname = 2; - repeated ServicePort servicePorts = 3; -} - -/** - The OM service that takes care of Ozone namespace. -*/ -service OzoneManagerService { - - /** - Creates a Volume. - */ - rpc createVolume(CreateVolumeRequest) - returns(CreateVolumeResponse); - - /** - Allows modificiation of volume properties. - */ - rpc setVolumeProperty(SetVolumePropertyRequest) - returns (SetVolumePropertyResponse); - - /** - Checks if the specified volume is accesible by the specified user. - */ - rpc checkVolumeAccess(CheckVolumeAccessRequest) - returns (CheckVolumeAccessResponse); - - /** - Gets Volume information. - */ - rpc infoVolume(InfoVolumeRequest) - returns(InfoVolumeResponse); - /** - Deletes a volume if it is empty. - */ - rpc deleteVolume(DeleteVolumeRequest) - returns (DeleteVolumeResponse); - - /** - Lists Volumes - */ - rpc listVolumes(ListVolumeRequest) - returns (ListVolumeResponse); - - /** - Creates a Bucket. - */ - rpc createBucket(CreateBucketRequest) - returns(CreateBucketResponse); - - /** - Get Bucket information. - */ - rpc infoBucket(InfoBucketRequest) - returns(InfoBucketResponse); - - /** - Sets bucket properties. - */ - rpc setBucketProperty(SetBucketPropertyRequest) - returns(SetBucketPropertyResponse); - - /** - Get key. - */ - rpc createKey(LocateKeyRequest) - returns(LocateKeyResponse); - - /** - Look up for an existing key. - */ - rpc lookupKey(LocateKeyRequest) - returns(LocateKeyResponse); - - /** - Rename an existing key within a bucket. - */ - rpc renameKey(RenameKeyRequest) - returns(RenameKeyResponse); - - /** - Delete an existing key. - */ - rpc deleteKey(LocateKeyRequest) - returns(LocateKeyResponse); - - /** - Deletes a bucket from volume if it is empty. - */ - rpc deleteBucket(DeleteBucketRequest) - returns (DeleteBucketResponse); - - /** - List Buckets. - */ - rpc listBuckets(ListBucketsRequest) - returns(ListBucketsResponse); - - /** - List Keys. - */ - rpc listKeys(ListKeysRequest) - returns(ListKeysResponse); - - /** - Commit a key. - */ - rpc commitKey(CommitKeyRequest) - returns(CommitKeyResponse); - - /** - Allocate a new block for a key. - */ - rpc allocateBlock(AllocateBlockRequest) - returns(AllocateBlockResponse); - - /** - Returns list of Ozone services with its configuration details. - */ - rpc getServiceList(ServiceListRequest) - returns(ServiceListResponse); -} diff --git a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties deleted file mode 100644 index 599f14d5eca40..0000000000000 --- a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties +++ /dev/null @@ -1,27 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -version=${declared.ozone.version} -release=${ozone.release} -revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} -url=${version-info.scm.uri} -srcChecksum=${version-info.source.md5} -protocVersion=${protobuf.version} diff --git a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh b/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh deleted file mode 100644 index 3fff7f5f3540f..0000000000000 --- a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [[ "${HADOOP_SHELL_EXECNAME}" = ozone ]]; then - hadoop_add_profile ozone -fi - - diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java deleted file mode 100644 index 2e69922594763..0000000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.OzoneConsts; -import org.junit.Test; - -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; - -/** - * Test Ozone Bucket Info operation. - */ -public class TestBucketInfo { - @Test - public void testBucketInfoJson() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - String bucketInfoString = bucketInfo.toJsonString(); - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); - } - - @Test - public void testBucketInfoDBString() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - String bucketInfoString = bucketInfo.toDBString(); - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); - } - - @Test - public void testBucketInfoAddAcls() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - String bucketInfoString = bucketInfo.toDBString(); - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); - List aclList = new LinkedList<>(); - - aclList.add(OzoneAcl.parseAcl("user:bilbo:r")); - aclList.add(OzoneAcl.parseAcl("user:samwise:rw")); - newBucketInfo.setAcls(aclList); - - assert(newBucketInfo.getAcls() != null); - assert(newBucketInfo.getAcls().size() == 2); - } - - - @Test - public void testBucketInfoVersionAndType() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - bucketInfo.setVersioning(OzoneConsts.Versioning.ENABLED); - bucketInfo.setStorageType(StorageType.DISK); - - String bucketInfoString = bucketInfo.toDBString(); - - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); - } - -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java deleted file mode 100644 index d777d0cd8974b..0000000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.junit.Test; - -import java.util.HashMap; -import java.util.Set; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test Ozone Volume Quota. - */ -public class TestQuota { - @Test - public void testParseQuota() { - HashMap testMatrix; - testMatrix = new HashMap(); - - testMatrix.put("10TB", Boolean.TRUE); - testMatrix.put("1 TB", Boolean.TRUE); - testMatrix.put("0MB", Boolean.TRUE); - testMatrix.put("0 TB", Boolean.TRUE); - testMatrix.put(" 1000MB ", Boolean.TRUE); - - testMatrix.put(" 1000MBMB ", Boolean.FALSE); - testMatrix.put(" 1000MB00 ", Boolean.FALSE); - testMatrix.put("1000ZMB", Boolean.FALSE); - testMatrix.put("MB1000", Boolean.FALSE); - testMatrix.put("9999", Boolean.FALSE); - testMatrix.put("1", Boolean.FALSE); - testMatrix.put("remove", Boolean.FALSE); - testMatrix.put("1UNDEFINED", Boolean.FALSE); - testMatrix.put(null, Boolean.FALSE); - testMatrix.put("", Boolean.FALSE); - testMatrix.put("-1000MB", Boolean.FALSE); - testMatrix.put("1024 bytes", Boolean.TRUE); - testMatrix.put("1bytes", Boolean.TRUE); - testMatrix.put("0bytes", Boolean.TRUE); - testMatrix.put("10000 BYTES", Boolean.TRUE); - testMatrix.put("BYTESbytes", Boolean.FALSE); - testMatrix.put("bytes", Boolean.FALSE); - - Set keys = testMatrix.keySet(); - for (String key : keys) { - if (testMatrix.get(key)) { - OzoneQuota.parseQuota(key); - } else { - try { - OzoneQuota.parseQuota(key); - // should never get here since the isValid call will throw - fail(key); - fail("An exception was expected but did not happen."); - } catch (IllegalArgumentException e) { - - } - } - } - } - - @Test - public void testVerifyQuota() { - OzoneQuota qt = OzoneQuota.parseQuota("10TB"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.TB); - assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L * 1024L)); - - qt = OzoneQuota.parseQuota("10MB"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.MB); - assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L)); - - qt = OzoneQuota.parseQuota("10GB"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.GB); - assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L)); - - qt = OzoneQuota.parseQuota("10BYTES"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.BYTES); - assertEquals(qt.sizeInBytes(), 10L); - - OzoneQuota emptyQuota = new OzoneQuota(); - assertEquals(emptyQuota.sizeInBytes(), -1L); - assertEquals(emptyQuota.getSize(), 0); - assertEquals(emptyQuota.getUnit(), OzoneQuota.Units.UNDEFINED); - } - - @Test - public void testVerifyRemove() { - assertTrue(OzoneQuota.isRemove("remove")); - assertFalse(OzoneQuota.isRemove("not remove")); - assertFalse(OzoneQuota.isRemove(null)); - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java deleted file mode 100644 index d3f8f5e659ca8..0000000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.junit.Test; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; - -import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID; -import static org.apache.hadoop.ozone.web.utils.OzoneUtils.verifyResourceName; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test Ozone Utility operations like verifying resource name. - */ -public class TestUtils { - - /** - * Tests if the bucket name handling is correct. - */ - @Test - public void testValidBucketNames() { - HashMap testMatrix; - // Init the Table with Strings and Expected Return values - testMatrix = new HashMap(); - - testMatrix.put("bucket-.ozone.self", Boolean.FALSE); - testMatrix.put("bucket.-ozone.self", Boolean.FALSE); - testMatrix.put(".bucket.ozone.self", Boolean.FALSE); - testMatrix.put("bucket.ozone.self.", Boolean.FALSE); - testMatrix.put("bucket..ozone.self", Boolean.FALSE); - testMatrix.put("192.1.1.1", Boolean.FALSE); - testMatrix.put("ab", Boolean.FALSE); - testMatrix.put("bucket.ozone.self.this.is.a.really.long.name.that." - + "is.more.than.sixty.three.characters.long.for.sure", Boolean.FALSE); - testMatrix.put(null, Boolean.FALSE); - testMatrix.put("bucket@$", Boolean.FALSE); - testMatrix.put("BUCKET", Boolean.FALSE); - testMatrix.put("bucket .ozone.self", Boolean.FALSE); - testMatrix.put(" bucket.ozone.self", Boolean.FALSE); - testMatrix.put("bucket.ozone.self-", Boolean.FALSE); - testMatrix.put("-bucket.ozone.self", Boolean.FALSE); - - testMatrix.put("bucket", Boolean.TRUE); - testMatrix.put("bucket.ozone.self", Boolean.TRUE); - testMatrix.put("bucket.ozone.self", Boolean.TRUE); - testMatrix.put("bucket-name.ozone.self", Boolean.TRUE); - testMatrix.put("bucket.1.ozone.self", Boolean.TRUE); - - Set keys = testMatrix.keySet(); - for (String key : keys) { - if (testMatrix.get(key)) { - - // For valid names there should be no exceptions at all - verifyResourceName(key); - } else { - try { - verifyResourceName(key); - // should never get here since the isValid call will throw - fail("An exception was expected but did not happen."); - } catch (IllegalArgumentException e) { - - } - } - } - } - - /** - * Just calls Request ID many times and assert we - * got different values, ideally this should be - * run under parallel threads. Since the function under - * test has no external dependencies it is assumed - * that this test is good enough. - */ - @Test - public void testRequestIDisRandom() { - HashSet set = new HashSet<>(); - for (int i = 0; i < 1000; i++) { - assertTrue(set.add(getRequestID())); - } - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java deleted file mode 100644 index b433be6fb5104..0000000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.ozone.web.response.VolumeInfo; -import org.apache.hadoop.ozone.web.response.VolumeOwner; -import org.junit.Test; - -import java.io.IOException; - -import static org.junit.Assert.assertEquals; - -/** - * Test Ozone Volume info structure. - */ -public class TestVolumeStructs { - - @Test - public void testVolumeInfoParse() throws IOException { - VolumeInfo volInfo = - new VolumeInfo("testvol", "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf"); - VolumeOwner owner = new VolumeOwner("bilbo"); - volInfo.setOwner(owner); - String jString = volInfo.toJsonString(); - VolumeInfo newVollInfo = VolumeInfo.parse(jString); - String one = volInfo.toJsonString(); - String two = newVollInfo.toJsonString(); - - assertEquals(volInfo.toJsonString(), newVollInfo.toJsonString()); - } - - @Test - public void testVolumeInfoValue() throws IOException { - String createdOn = "Thu, Apr 9, 2015 10:23:45 GMT"; - String createdBy = "gandalf"; - VolumeInfo volInfo = new VolumeInfo("testvol", createdOn, createdBy); - assertEquals(volInfo.getCreatedBy(), createdBy); - assertEquals(volInfo.getCreatedOn(), createdOn); - } - - - @Test - public void testVolumeListParse() throws IOException { - ListVolumes list = new ListVolumes(); - for (int x = 0; x < 100; x++) { - VolumeInfo volInfo = new VolumeInfo("testvol" + Integer.toString(x), - "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf"); - list.addVolume(volInfo); - } - list.sort(); - String listString = list.toJsonString(); - ListVolumes newList = ListVolumes.parse(listString); - assertEquals(list.toJsonString(), newList.toJsonString()); - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/package-info.java deleted file mode 100644 index ddbc30e09f265..0000000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web; -/** - * Unit tests of generic ozone web app and rest utils. - */ diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml deleted file mode 100644 index 02995f53488f3..0000000000000 --- a/hadoop-ozone/datanode/pom.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.3.0-SNAPSHOT - - hadoop-ozone-datanode - Apache Hadoop Ozone Datanode - jar - 0.3.0-SNAPSHOT - - - - org.apache.hadoop - hadoop-common - compile - - - org.apache.hadoop - hadoop-hdfs - compile - - - org.apache.hadoop - hadoop-hdds-container-service - - - org.apache.hadoop - hadoop-ozone-objectstore-service - - - diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching deleted file mode 100755 index 3f102fa01b00e..0000000000000 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# project.build.directory -BASEDIR=$1 - -#hdds.version -HDDS_VERSION=$2 - -## @audience private -## @stability evolving -function run() -{ - declare res - - echo "\$ ${*}" - "${@}" - res=$? - if [[ ${res} != 0 ]]; then - echo - echo "Failed!" - echo - exit "${res}" - fi -} - -## @audience private -## @stability evolving -function findfileindir() -{ - declare file="$1" - declare dir="${2:-./share}" - declare count - - count=$(find "${dir}" -iname "${file}" | wc -l) - - #shellcheck disable=SC2086 - echo ${count} -} - - -# shellcheck disable=SC2164 -ROOT=$(cd "${BASEDIR}"/../../..;pwd) -echo -echo "Current directory $(pwd)" -echo - -run rm -rf "ozone-${HDDS_VERSION}" -run mkdir "ozone-${HDDS_VERSION}" -run cd "ozone-${HDDS_VERSION}" -run cp -p "${ROOT}/LICENSE.txt" . -run cp -p "${ROOT}/NOTICE.txt" . -run cp -p "${ROOT}/README.txt" . - -run mkdir -p ./share/hadoop/mapreduce -run mkdir -p ./share/hadoop/ozone -run mkdir -p ./share/hadoop/hdds -run mkdir -p ./share/hadoop/yarn -run mkdir -p ./share/hadoop/hdfs -run mkdir -p ./share/hadoop/common -run mkdir -p ./share/ozone/web -run mkdir -p ./bin -run mkdir -p ./sbin -run mkdir -p ./etc -run mkdir -p ./libexec - -run cp -r "${ROOT}/hadoop-common-project/hadoop-common/src/main/conf" "etc/hadoop" - -run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop" "bin/" -run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd" "bin/" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/" - -run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh" "libexec/" -run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd" "libexec/" -run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh" "libexec/" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone-config.sh" "libexec/" -run cp -r "${ROOT}/hadoop-ozone/common/src/main/shellprofile.d" "libexec/" - - -run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh" "sbin/" -run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/workers.sh" "sbin/" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/" - -#shaded ozonefs -run mkdir -p "./share/hadoop/ozonefs" -run cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" - -#shaded datanode service -run mkdir -p "./share/hadoop/ozoneplugin" -run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar" - - -# Optional documentation, could be missing -cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ozoneManager/ -cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/ - -#Copy docker compose files -run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/compose" . -run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" . diff --git a/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching b/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching deleted file mode 100755 index c94e7d0eb6eaf..0000000000000 --- a/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# project.version -VERSION=$1 - -# project.build.directory -BASEDIR=$2 - -## @audience private -## @stability evolving -function run() -{ - declare res - - echo "\$ ${*}" - "${@}" - res=$? - if [[ ${res} != 0 ]]; then - echo - echo "Failed!" - echo - exit "${res}" - fi -} - -run tar -c -f "ozone-${VERSION}.tar" "ozone-${VERSION}" -run gzip -f "ozone-${VERSION}.tar" -echo -echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz" -echo diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml deleted file mode 100644 index 7d1137274e210..0000000000000 --- a/hadoop-ozone/dist/pom.xml +++ /dev/null @@ -1,195 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.3.0-SNAPSHOT - - hadoop-ozone-dist - Apache Hadoop Ozone Distribution - pom - 0.3.0-SNAPSHOT - - UTF-8 - true - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy-classpath-files - package - - copy - - - target/ozone-${ozone.version}/share/ozone/classpath - - - - org.apache.hadoop - hadoop-hdds-server-scm - ${hdds.version} - classpath - hadoop-hdds-server-scm.classpath - - - org.apache.hadoop - hadoop-hdds-tools - ${hdds.version} - classpath - hadoop-hdds-tools.classpath - - - org.apache.hadoop - hadoop-ozone-s3gateway - ${ozone.version} - classpath - hadoop-ozone-s3gateway.classpath - - - org.apache.hadoop - hadoop-ozone-ozone-manager - ${ozone.version} - classpath - hadoop-ozone-ozone-manager.classpath - - - - org.apache.hadoop - hadoop-ozone-tools - ${ozone.version} - classpath - hadoop-ozone-tools.classpath - - - org.apache.hadoop - hadoop-ozone-common - ${ozone.version} - classpath - hadoop-ozone-common.classpath - - - org.apache.hadoop - hadoop-ozone-datanode - ${ozone.version} - classpath - hadoop-ozone-datanode.classpath - - - - - - copy-jars - package - - copy-dependencies - - - target/ozone-${ozone.version}/share/ozone/lib - - runtime - - - - - - org.codehaus.mojo - exec-maven-plugin - - - dist - prepare-package - - exec - - - ${shell-executable} - ${project.build.directory} - - - ${basedir}/dev-support/bin/dist-layout-stitching - - ${project.build.directory} - ${hdds.version} - - - - - tar-ozone - package - - exec - - - ${shell-executable} - ${project.build.directory} - - - ${basedir}/dev-support/bin/dist-tar-stitching - - ${hdds.version} - ${project.build.directory} - - - - - - - - - - - org.apache.hadoop - hadoop-hdds-tools - - - org.apache.hadoop - hadoop-hdds-server-scm - - - org.apache.hadoop - hadoop-hdds-container-service - - - org.apache.hadoop - hadoop-ozone-s3gateway - - - org.apache.hadoop - hadoop-ozone-ozone-manager - - - org.apache.hadoop - hadoop-ozone-tools - - - org.apache.hadoop - hadoop-ozone-common - - - org.apache.hadoop - hadoop-ozone-datanode - - - diff --git a/hadoop-ozone/dist/src/main/compose/README.md b/hadoop-ozone/dist/src/main/compose/README.md deleted file mode 100644 index 8189d2c169a89..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/README.md +++ /dev/null @@ -1,51 +0,0 @@ - - -# Docker cluster definitions - -This directory contains multiple docker cluster definitions to start local pseudo cluster with different configuration. - -It helps to start local (multi-node like) pseudo cluster with docker and docker-compose and obviously it's not for production. - -You may find more information in the specific subdirectories but in generic you can use the following commands: - -## Usage - -To start a cluster go to a subdirectory and start the cluster: - -``` -docker-compose up -d -``` - -You can check the logs of all the components with: - -``` -docker-compose logs -``` - -In case of a problem you can destroy the cluster an delete all the local state with: - -``` -docker-compose down -``` - -(Note: a simple docker-compose stop may not delete all the local data). - -You can scale up and down the components: - -``` -docker-compose scale datanode=5 -``` - -Usually the key webui ports are published on the docker host. diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env deleted file mode 100644 index c437513bbd86d..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HADOOP_VERSION=3.1.0 \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml deleted file mode 100644 index b89052dd0cc18..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - namenode: - image: flokkr/hadoop:${HADOOP_VERSION} - ports: - - 9870:9870 - env_file: - - ./docker-config - environment: - ENSURE_NAMENODE_DIR: "/tmp/hadoop-root/dfs/name" - command: ["hdfs", "namenode"] - datanode: - image: flokkr/hadoop:${HADOOP_VERSION} - ports: - - 9864 - volumes: - - ../..:/opt/ozone - command: ["hdfs","datanode"] - environment: - HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozoneplugin/*.jar - env_file: - - ./docker-config - ozoneManager: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION - env_file: - - ./docker-config - command: ["ozone","om"] - scm: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config deleted file mode 100644 index 3b2819fbf77bc..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=ozoneManager -OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService - -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozone/.env b/hadoop-ozone/dist/src/main/compose/ozone/.env deleted file mode 100644 index 67eed25884f9b..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone/.env +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml deleted file mode 100644 index 0a6a9d802803c..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - ozoneManager: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config deleted file mode 100644 index f2c8db1f3bbc2..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=ozoneManager -OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml deleted file mode 100644 index a1e874849eec4..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - ozoneManager: - image: apache/hadoop-runner - hostname: ozoneManager - volumes: - - ../..:/opt/hadoop - ports: - - 9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - hadooplast: - image: flokkr/hadoop:3.1.0 - volumes: - - ../..:/opt/ozone - env_file: - - ./docker-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozonefs/*.jar - command: ["watch","-n","100000","ls"] diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config deleted file mode 100644 index 3171f089e1b59..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem -OZONE-SITE.XML_ozone.om.address=ozoneManager -OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env b/hadoop-ozone/dist/src/main/compose/ozoneperf/.env deleted file mode 100644 index cac418ae59ee5..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md b/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md deleted file mode 100644 index 527ff418d37cb..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md +++ /dev/null @@ -1,73 +0,0 @@ - - -# Compose files for local performance tests - -This directory contains docker-compose definition for an ozone cluster where -all the metrics are saved to a prometheus instance. - - Prometheus follows a pull based approach where the metrics are published - on a HTTP endpoint. - - Our current approach: - - 1. A Java agent activates a prometheus metrics endpoint in every JVM instance - (use `init.sh` to download the agent) - - 2. The Java agent publishes all the jmx parameters in prometheus format AND - register the endpoint address to the consul. - - 3. Prometheus polls all the endpoints which are registered to consul. - - - -## How to use - -First of all download the required Java agent with running `./init.sh` - -After that you can start the cluster with docker-compose: - -``` -docker-compose up -d -``` - -After a while the cluster will be started. You can check the ozone web ui-s: - -https://localhost:9874 -https://localhost:9876 - -You can also scale up the datanodes: - -``` -docker-compose scale datanode=3 -``` - -Freon (Ozone test generator tool) is not part of docker-compose by default, -you can activate it using `compose-all.sh` instead of `docker-compose`: - -``` -compose-all.sh up -d -``` - -Now Freon is running. Let's try to check the metrics from the local Prometheus: - -http://localhost:9090/graph - -Example queries: - -``` -Hadoop_OzoneManager_NumKeyCommits -rate(Hadoop_OzoneManager_NumKeyCommits[10m]) -rate(Hadoop_Ozone_BYTES_WRITTEN[10m]) -``` diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/compose-all.sh b/hadoop-ozone/dist/src/main/compose/ozoneperf/compose-all.sh deleted file mode 100755 index 82ab8b3101def..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/compose-all.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -docker-compose -f docker-compose.yaml -f docker-compose-freon.yaml "$@" diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose-freon.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose-freon.yaml deleted file mode 100644 index 60bdc4a503f4c..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose-freon.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - freon: - image: apache/hadoop-runner - volumes: - - ../../ozone:/opt/hadoop - - ./jmxpromo.jar:/opt/jmxpromo.jar - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","freon"] diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml deleted file mode 100644 index 12b28bbcf0d68..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - - ./jmxpromo.jar:/opt/jmxpromo.jar - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - ozoneManager: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - - ./jmxpromo.jar:/opt/jmxpromo.jar - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - - ./jmxpromo.jar:/opt/jmxpromo.jar - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - consul: - image: consul - command: ["agent", "-dev", "-ui", "-client", "0.0.0.0"] - ports: - - 8500:8500 - prometheus: - image: prom/prometheus - volumes: - - "./prometheus.yml:/etc/prometheus.yml" - command: ["--config.file","/etc/prometheus.yml"] - ports: - - 9090:9090 diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config deleted file mode 100644 index 1ed116992c2a6..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=ozoneManager -OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -HADOOP_OPTS=-javaagent:/opt/jmxpromo.jar=port=0:consulHost=consul:consulMode=node -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/init.sh b/hadoop-ozone/dist/src/main/compose/ozoneperf/init.sh deleted file mode 100755 index cf25398bc20ce..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/init.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -EXPORTER_FILE="$DIR/jmxpromo.jar" -if [ ! -f "$EXPORTER_FILE" ]; then - wget https://github.com/flokkr/jmxpromo/releases/download/0.11/jmx_prometheus_javaagent-0.11.jar -O $EXPORTER_FILE -fi diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/prometheus.yml b/hadoop-ozone/dist/src/main/compose/ozoneperf/prometheus.yml deleted file mode 100644 index 80aa5203a20ed..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/prometheus.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -global: - scrape_interval: 15s # By default, scrape targets every 15 seconds. - -scrape_configs: - - job_name: jmxexporter - consul_sd_configs: - - server: consul:8500 - services: - - jmxexporter diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml deleted file mode 100644 index 42d7d1d1a67a1..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["ozone","datanode"] - env_file: - - ./docker-config - ozoneManager: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION - env_file: - - ./docker-config - command: ["ozone","om"] - scm: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["ozone","scm"] - s3g: - image: apache/hadoop-runner - volumes: - - ../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - command: ["ozone","s3g"] diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config deleted file mode 100644 index 2b22874b021d2..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=ozoneManager -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys deleted file mode 100644 index ae390529c7eb2..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config deleted file mode 100644 index 6506916ded0f3..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -Host * - UserKnownHostsFile /dev/null - StrictHostKeyChecking no diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment deleted file mode 100644 index 5685453be12e6..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/ diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa deleted file mode 100644 index 6632ce51c54a7..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA4BJi6WJuAa1ratShvYYWVwmYBqxE57btHjU6NtVN1SnPZx/f -6LezOpQGsLBXE/bl7uG+fD05Z378B/0wE5QhYwvJ9Ge0jsfhVOi90p/FEYfR2l+C -9LRfLvO6AgA+HQa1BoYQd+norh/XQQVpcukSn32Cb642rW3OxsQTv3uTklDLapPw -0lSpLJgYWcbrayorwjYw4rIfVYhiH/G+ckvzJRBpmlSgKYpNXyeYjpZaDPxoKQet -GFf4W6nwnnG1oHojNYCvdoHTnAP1sdQu34/AUiOYg0hOnmXhfNIDN+NtZb8jw1sl -YRWhm5GuK1wV/32/UEkQQPUU6oUSgBJgCH+PRwIDAQABAoIBAQDI1TH6ZNKchkck -9XgSWsBjOqIcOQN5fCeDT8nho8WjLVpL3/Hcr+ngsxRcAXHK3xyvw33r9SQic1qJ -/pC8u6RBFivo95qJ7vU0GXcp9TG4yLd6tui1U4WMm784U+dYNM7EDh1snSaECt3v -1V3yNJ0QfnoOh2NShn0zAkOA+M4H8Nx2SudMCsjcbK9+fYxzW3hX+sJpMKdjG1HW -DUz+I7cW7t0EGaVrgVSV+eR58LiXu+14YDNMrySiejB4nD2sKrx93XgiCBECCsBN -GLQGJCztaXoAY+5Kf/aJ9EEf2wBF3GecRk+XIAd87PeDmeahLQAVkAJ/rD1vsKFs -8kWi6CrhAoGBAP7leG/dMBhlfvROpBddIfWm2i8B+oZiAlSjdYGz+/ZhUaByXk18 -pupMGiiMQR1ziPnEg0gNgR2ZkH54qrXPn5WcQa4rlSEtUsZEp5v5WblhfX2QwKzY -G/uhA+mB7wXpQkSmXo0LclfPF2teROQrG1OyfWkWbxFH4i3+em7sL95jAoGBAOEK -v+wscqkMLW7Q8ONbWMCCBlmMHr6baB3VDCYZx25lr+GIF5zmJJFTmF2rq2VSAlts -qx1AGmaUSo78kC5FuJvSNTL6a1Us5ucdthQZM3N8pAz+OAE+QEU+BsdA27yAh3tO -yKDsMFNHKtXcgy5LeB5gzENLlNyw2jgkRv2Ef77NAoGAVH8DHqoHEH9Mx3XuRWR1 -JnaqKx0PzE5fEWmiQV3Fr5XxNivTgQJKXq7dmQVtbHLpPErdbhwz6fkHAjXD+UMb -VsAWscL2y6m3n8wQd87/5EkiDWbXyDRXimGE53pQHviFJDa2bzEVNXCMBeaZFb4I -cAViN1zdcrAOlUqfkXewIpsCgYB8wsXl/DpRB+RENGfn0+OfTjaQ/IKq72NIbq1+ -jfondQ6N/TICFQEe5HZrL9okoNOXteYjoD9CsWGoZdLVJGgVUvOVYImSvgMBDFK+ -T75bfzU/0sxfvBBLkviVDJsFpUf3D5VgybB86s6Po+HCD6r3RHjZshRESXOhflMx -B3z+3QKBgE2Lwo0DuwUGkm6k8psyn3x8EiXNsiNw12cojicFTyKUYLHxMBeVbCLW -3j3pxSggJgRuBLLzixUHbHp91r2ARTy28naK7R/la8yKVqK6ojcikN2mQsCHYtwB -nuFwXr42ytn6G+9Wn4xT64tGjRCqyZn0/v0XsPjVCyrZ6G7EtNHP ------END RSA PRIVATE KEY----- diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub deleted file mode 100644 index ae390529c7eb2..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile deleted file mode 100644 index 3bdcb0cc9827d..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -FROM apache/hadoop-runner -RUN sudo apt-get update && sudo apt-get install -y openssh-server - -RUN sudo mkdir -p /run/sshd -RUN sudo sed -i "s/.*UsePrivilegeSeparation.*/UsePrivilegeSeparation no/g" /etc/ssh/sshd_config -RUN sudo sed -i "s/.*PermitUserEnvironment.*/PermitUserEnvironment yes/g" /etc/ssh/sshd_config -RUN sudo sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd - -#/opt/hadoop is mounted, we can't use it as a home -RUN sudo usermod -d /opt hadoop -ADD .ssh /opt/.ssh -RUN sudo chown -R hadoop /opt/.ssh -RUN sudo chown hadoop /opt -RUN sudo chmod 600 /opt/.ssh/* -RUN sudo chmod 700 /opt/.ssh - -RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/" >> /etc/profile' -CMD ["sudo","/usr/sbin/sshd","-D"] diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md b/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md deleted file mode 100644 index 2531fa43660f0..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# start-ozone environment - -This is an example environment to use/test `./sbin/start-ozone.sh` and `./sbin/stop-ozone.sh` scripts. - -There are ssh connections between the containers and the start/stop scripts could handle the start/stop process -similar to a real cluster. - -To use it, first start the cluster: - -``` -docker-copmose up -d -``` - -After a successfull startup (which starts only the ssh daemons) you can start ozone: - -``` -./start.sh -``` - -Check it the java processes are started: - -``` -./ps.sh -``` \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml deleted file mode 100644 index 62f116368f4bb..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - build: . - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - env_file: - - ./docker-config - om: - build: . - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - env_file: - - ./docker-config - scm: - build: . - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config deleted file mode 100644 index 1afec73e0edeb..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/ -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 -OZONE-SITE.XML_ozone.ksm.address=ksm -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh deleted file mode 100755 index d5e2c38675244..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -docker-compose ps -q | xargs -n1 -I CONTAINER docker exec CONTAINER ps xa \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh deleted file mode 100755 index 3358b07c4e9dc..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -docker-compose ps | grep datanode | awk '{print $1}' | xargs -n1 docker inspect --format '{{ .Config.Hostname }}' > ../../etc/hadoop/workers -docker-compose exec scm /opt/hadoop/bin/ozone scm -init -docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh -#We need a running SCM for om objectstore creation -#TODO create a utility to wait for the startup -sleep 10 -docker-compose exec om /opt/hadoop/bin/ozone om -createObjectStore -docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh deleted file mode 100755 index a3ce08af57315..0000000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -docker-compose exec scm /opt/hadoop/sbin/stop-ozone.sh diff --git a/hadoop-ozone/dist/src/main/ozone/README.txt b/hadoop-ozone/dist/src/main/ozone/README.txt deleted file mode 100644 index 6bbd83ffd04d2..0000000000000 --- a/hadoop-ozone/dist/src/main/ozone/README.txt +++ /dev/null @@ -1,51 +0,0 @@ - - -This is the distribution of Apache Hadoop Ozone. - -Ozone is a submodule of Hadoop with separated release cycle. For more information, check - - http://ozone.hadoop.apache.org - - and - - https://cwiki.apache.org/confluence/display/HADOOP/Ozone+Contributor+Guide - -For more information about Hadoop, check: - - http://hadoop.apache.org - -This distribution includes cryptographic software. The country in -which you currently reside may have restrictions on the import, -possession, use, and/or re-export to another country, of -encryption software. BEFORE using any encryption software, please -check your country's laws, regulations and policies concerning the -import, possession, or use, and re-export of encryption software, to -see if this is permitted. See for more -information. - -The U.S. Government Department of Commerce, Bureau of Industry and -Security (BIS), has classified this software as Export Commodity -Control Number (ECCN) 5D002.C.1, which includes information security -software using or performing cryptographic functions with asymmetric -algorithms. The form and manner of this Apache Software Foundation -distribution makes it eligible for export under the License Exception -ENC Technology Software Unrestricted (TSU) exception (see the BIS -Export Administration Regulations, Section 740.13) for both object -code and source code. - -The following provides more details on the included cryptographic -software: - Hadoop Core uses the SSL libraries from the Jetty project written -by mortbay.org. diff --git a/hadoop-ozone/dist/src/main/smoketest/README.md b/hadoop-ozone/dist/src/main/smoketest/README.md deleted file mode 100644 index c521a54beb20b..0000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/README.md +++ /dev/null @@ -1,30 +0,0 @@ - - -## Ozone Acceptance Tests - -This directory contains a [robotframework](http://robotframework.org/) based test suite for Ozone to make it easier to check the current state of the package. - -You can run in in any environment after [installing](https://github.com/robotframework/robotframework/blob/master/INSTALL.rst) - -``` -cd $DIRECTORY_OF_OZONE -robot smoketest/basic -``` - -The argument of the `robot` could be any robot file or directory. - -The current configuration in the robot files (hostnames, ports) are adjusted for the docker-based setup but you can easily modify it for any environment. - -The `./test.sh` in this directory can start multiple type of clusters (ozone standalone or ozone + hdfs) and execute the test framework with all of the clusters. diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot deleted file mode 100644 index a69450dbba859..0000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem -Resource ../commonlib.robot - -*** Variables *** -${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" -${DATANODE_HOST} localhost - - -*** Test Cases *** - -Test rest interface - ${result} = Execute curl -i -X POST ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1" - Should contain ${result} 201 Created - ${result} = Execute curl -i -X POST ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1/bucket1" - Should contain ${result} 201 Created - ${result} = Execute curl -i -X DELETE ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1/bucket1" - Should contain ${result} 200 OK - ${result} = Execute curl -i -X DELETE ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1" - Should contain ${result} 200 OK - -Check webui static resources - ${result} = Execute curl -s -I http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js - Should contain ${result} 200 - ${result} = Execute curl -s -I http://ozoneManager:9874/static/bootstrap-3.3.7/js/bootstrap.min.js - Should contain ${result} 200 - -Start freon testing - ${result} = Execute ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 10 - Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 - Should Not Contain ${result} ERROR diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot deleted file mode 100644 index 14a576170d75b..0000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test ozone shell CLI usage -Library OperatingSystem -Resource ../commonlib.robot -Test Timeout 2 minute - -*** Variables *** - -*** Test Cases *** -RestClient without http port - Test ozone shell http:// ozoneManager restwoport - -RestClient with http port - Test ozone shell http:// ozoneManager:9874 restwport - -RestClient without host name - Test ozone shell http:// ${EMPTY} restwohost - -RpcClient with port - Test ozone shell o3:// ozoneManager:9862 rpcwoport - -RpcClient without host - Test ozone shell o3:// ${EMPTY} rpcwport - -RpcClient without scheme - Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme - - -*** Keywords *** -Test ozone shell - [arguments] ${protocol} ${server} ${volume} - ${result} = Execute ozone sh volume create ${protocol}${server}/${volume} --user bilbo --quota 100TB --root - Should not contain ${result} Failed - Should contain ${result} Creating Volume: ${volume} - ${result} = Execute ozone sh volume list ${protocol}${server}/ --user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' - Should contain ${result} createdOn - ${result} = Execute ozone sh volume list --user bilbo | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | select(.volumeName=="${volume}")' - Should contain ${result} createdOn - Execute ozone sh volume update ${protocol}${server}/${volume} --user bill --quota 10TB - ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size' - Should Be Equal ${result} 10 - Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute ozone sh bucket update ${protocol}${server}/${volume}/bb1 --addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute ozone sh bucket update ${protocol}${server}/${volume}/bb1 --removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} ${volume} - Run Keyword Test key handling ${protocol} ${server} ${volume} - Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 - Execute ozone sh volume delete ${protocol}${server}/${volume} --user bilbo - -Test key handling - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key1 /opt/hadoop/NOTICE.txt - Execute rm -f NOTICE.txt.1 - Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1 NOTICE.txt.1 - Execute ls -l NOTICE.txt.1 - ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' - Should Be Equal ${result} key1 - Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key1 diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot deleted file mode 100644 index e2620fa4340b5..0000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Keywords *** - - -Execute - [arguments] ${command} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${output} diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot deleted file mode 100644 index fb7b98cec6335..0000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Ozonefs test -Library OperatingSystem -Resource ../commonlib.robot - -*** Variables *** - - -*** Test Cases *** -Create volume and bucket - Execute ozone sh volume create http://ozoneManager/fstest --user bilbo --quota 100TB --root - Execute ozone sh bucket create http://ozoneManager/fstest/bucket1 - -Check volume from ozonefs - ${result} = Execute ozone fs -ls o3://bucket1.fstest/ - -Create directory from ozonefs - Execute ozone fs -mkdir -p o3://bucket1.fstest/testdir/deep - ${result} = Execute ozone sh key list o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' - Should contain ${result} testdir/deep diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awscli.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awscli.robot deleted file mode 100644 index b26ad91d4469c..0000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/awscli.robot +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 - -*** Keywords *** -Execute AWSCli - [Arguments] ${command} - ${output} = Execute aws s3 --endpoint-url ${ENDPOINT_URL}/${VOLUME} ${command} - [return] ${output} - -*** Test Cases *** - -Create volume and bucket for the tests - ${postfix} = Generate Random String 5 [NUMBERS] - Set Suite Variable ${BUCKET} bucket-${postfix} - Set Suite Variable ${VOLUME} vol-${postfix} - Log Testing s3 commands in /${VOLUME}/${BUCKET} - ${result} = Execute ozone sh volume create /${VOLUME} --user hadoop - ${result} = Execute ozone sh bucket create /${VOLUME}/${BUCKET} - -Install aws s3 cli - Execute sudo apt-get install -y awscli - Set Environment Variable AWS_ACCESS_KEY_ID ANYID - Set Environment Variable AWS_SECRET_ACCESS_KEY ANYKEY - -File upload and directory list - Execute date > /tmp/testfile - ${result} = Execute AWSCli cp /tmp/testfile s3://${BUCKET} - Should contain ${result} upload - ${result} = Execute AWSCli cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file - Should contain ${result} upload - ${result} = Execute AWSCli ls s3://${BUCKET} - Should contain ${result} testfile - Should contain ${result} dir1 - Should not contain ${result} dir2 - ${result} = Execute AWSCli ls s3://${BUCKET}/dir1/ - Should not contain ${result} testfile - Should not contain ${result} dir1 - Should contain ${result} dir2 - ${result} = Execute AWSCli ls s3://${BUCKET}/dir1/dir2/ - Should not contain ${result} testfile - Should not contain ${result} dir1 - Should contain ${result} file diff --git a/hadoop-ozone/dist/src/main/smoketest/test.sh b/hadoop-ozone/dist/src/main/smoketest/test.sh deleted file mode 100755 index b32955a2a52ab..0000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/test.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" - -execute_tests(){ - COMPOSE_FILE=$DIR/../compose/$1/docker-compose.yaml - TESTS=$2 - echo "Executing test ${TESTS[*]} with $COMPOSE_FILE" - docker-compose -f "$COMPOSE_FILE" down - docker-compose -f "$COMPOSE_FILE" up -d - docker-compose -f "$COMPOSE_FILE" exec datanode sudo apt-get update - docker-compose -f "$COMPOSE_FILE" exec datanode sudo apt-get install -y python-pip - docker-compose -f "$COMPOSE_FILE" exec datanode sudo pip install robotframework - for TEST in "${TESTS[@]}"; do - set +e - docker-compose -f "$COMPOSE_FILE" exec datanode python -m robot "smoketest/$TEST" - set -e - done - if [ "$KEEP_RUNNING" = false ]; then - docker-compose -f "$COMPOSE_FILE" down - fi -} -RUN_ALL=true -KEEP_RUNNING=false -POSITIONAL=() -while [[ $# -gt 0 ]] -do -key="$1" - -case $key in - --env) - DOCKERENV="$2" - RUN_ALL=false - shift # past argument - shift # past value - ;; - --keep) - KEEP_RUNNING=true - shift # past argument - ;; - --help|-h|-help) - cat << EOF - - Acceptance test executor for ozone. - - This is a lightweight test executor for ozone. - - You can run it with - - ./test.sh - - Which executes all the tests in all the available environments. - - Or you can run manually one test with - - ./test.sh --keep --env ozone-hdfs basic - - --keep means that docker cluster won't be stopped after the test (optional) - --env defines the subdirectory under the compose dir - The remaining parameters define the test suites under smoketest dir. - Could be any directory or robot file relative to the smoketest dir. -EOF - exit 0 - ;; - *) - POSITIONAL+=("$1") # save it in an array for later - shift # past argument - ;; -esac -done - -if [ "$RUN_ALL" = true ]; then -# -# This is the definition of the ozone acceptance test suite -# -# We select the test suites and execute them on multiple type of clusters -# - DEFAULT_TESTS=("basic") - execute_tests ozone "${DEFAULT_TESTS[@]}" - TESTS=("ozonefs") - execute_tests ozonefs "${TESTS[@]}" - TESTS=("s3") - execute_tests ozones3 "${TESTS[@]}" -else - execute_tests "$DOCKERENV" "${POSITIONAL[@]}" -fi diff --git a/hadoop-ozone/docs/README.md b/hadoop-ozone/docs/README.md deleted file mode 100644 index 85817a79d0665..0000000000000 --- a/hadoop-ozone/docs/README.md +++ /dev/null @@ -1,55 +0,0 @@ - -# Hadoop Ozone/HDDS docs - -This subproject contains the inline documentation for Ozone/HDDS components. - -You can create a new page with: - -``` -hugo new content/title.md -``` - -You can check the rendering with: - -``` -hugo serve -``` - -This maven project will create the rendered HTML page during the build (ONLY if hugo is available). -And the dist project will include the documentation. - -You can adjust the menu hierarchy with adjusting the header of the markdown file: - -To show it in the main header add the menu entry: - -``` ---- -menu: main ---- -``` - -To show it as a subpage, you can set the parent. (The value could be the title of the parent page, -our you can defined an `id: ...` in the parent markdown and use that in the parent reference. - -``` ---- -menu: - main: - parent: "Getting started" ---- -``` diff --git a/hadoop-ozone/docs/archetypes/default.md b/hadoop-ozone/docs/archetypes/default.md deleted file mode 100644 index f4cc9998dc60f..0000000000000 --- a/hadoop-ozone/docs/archetypes/default.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -menu: main ---- - diff --git a/hadoop-ozone/docs/config.yaml b/hadoop-ozone/docs/config.yaml deleted file mode 100644 index e86b59970cd15..0000000000000 --- a/hadoop-ozone/docs/config.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -languageCode: "en-us" -DefaultContentLanguage: "en" -title: "Ozone" -theme: "ozonedoc" -pygmentsCodeFences: true -uglyurls: true -relativeURLs: true - -menu: - main: - - identifier: Starting - name: "Getting Started" - title: "Getting Started" - url: runningviadocker.html - weight: 1 - - identifier: Client - name: Client - title: Client - url: commandshell.html - weight: 2 - - identifier: Tools - name: Tools - title: Tools - url: dozone.html - weight: 3 diff --git a/hadoop-ozone/docs/content/BucketCommands.md b/hadoop-ozone/docs/content/BucketCommands.md deleted file mode 100644 index 3ab35053028ce..0000000000000 --- a/hadoop-ozone/docs/content/BucketCommands.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Bucket Commands -menu: - main: - parent: Client - weight: 3 ---- - - -Ozone shell supports the following bucket commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [update](#update) - -### Create - -The bucket create command allows a user to create a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket in **/volume/bucket** format. - -{{< highlight bash >}} -ozone sh bucket create /hive/jan -{{< /highlight >}} - -The above command will create a bucket called _jan_ in the _hive_ volume. -Since no scheme was specified this command defaults to O3 (RPC) protocol. - -### Delete - -The bucket delete commands allows an user to delete a volume. If the -bucket is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket - -{{< highlight bash >}} -ozone sh volume delete /hive/jan -{{< /highlight >}} - -The above command will delete _jan_ bucket if it is empty. - -### Info - -The bucket info commands returns the information about the bucket. -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket. - -{{< highlight bash >}} -ozone sh bucket info /hive/jan -{{< /highlight >}} - -The above command will print out the information about _jan_ bucket. - -### List - -The bucket list commands allows uset to list the buckets in a volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, --length | Maximum number of results to return. Default: 100 -| -p, --prefix | Optional, Only buckets that match this prefix will be returned. -| -s, --start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh bucket list /hive -{{< /highlight >}} - -This command will list all buckets on the volume _hive_. - - - -### Update - -The bucket update command allows changing access permissions on bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| --addAcl | Optional, Comma separated ACLs that will added to bucket. -| --removeAcl | Optional, Comma separated list of acl to remove. -| Uri | The name of the bucket. - -{{< highlight bash >}} -ozone sh bucket update --addAcl=user:bilbo:rw /hive/jan -{{< /highlight >}} - -The above command gives user bilbo read/write permission to the bucket. - -You can try out these commands from the docker instance of the [Alpha -Cluster](runningviadocker.html). diff --git a/hadoop-ozone/docs/content/BuildingSources.md b/hadoop-ozone/docs/content/BuildingSources.md deleted file mode 100644 index 1953f47f59b0d..0000000000000 --- a/hadoop-ozone/docs/content/BuildingSources.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Building from Sources -weight: 1 -menu: - main: - parent: Starting - weight: 5 ---- - - -***This is a guide on how to build the ozone sources. If you are not -planning to build sources yourself, you can safely skip this page.*** - -If you are a Hadoop ninja, and wise in the ways of Apache, you already know -that a real Apache release is a source release. - -If you want to build from sources, Please untar the source tarball and run -the ozone build command. This instruction assumes that you have all the -dependencies to build Hadoop on your build machine. If you need instructions -on how to build Hadoop, please look at the Apache Hadoop Website. - -{{< highlight bash >}} -mvn clean package -DskipTests=true -Dmaven.javadoc.skip=true -Phdds -Pdist -Dtar -DskipShade -{{< /highlight >}} - - -This will build an ozone-\.tar.gz in your target directory. - -You can copy this tarball and use this instead of binary artifacts that are -provided along with the official release. - -## How to test the build -You can run the acceptance tests in the hadoop-ozone directory to make sure -that your build is functional. To launch the acceptance tests, please follow - the instructions in the **README.md** in the - ```$hadoop_src/hadoop-ozone/acceptance-test``` directory. Acceptance tests - will start a small ozone cluster and verify that ozone shell and ozone file - system is fully functional. diff --git a/hadoop-ozone/docs/content/CommandShell.md b/hadoop-ozone/docs/content/CommandShell.md deleted file mode 100644 index 74072a5218d79..0000000000000 --- a/hadoop-ozone/docs/content/CommandShell.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Ozone CLI -menu: - main: - parent: Client - weight: 1 ---- - - -Ozone has a set of command line tools that can be used to manage ozone. - -All these commands are invoked via the ```ozone``` script. - -The commands supported by ozone are: - - * **classpath** - Prints the class path needed to get the hadoop jar and the - required libraries. - * **fs** - Runs a command on ozone file system. - * **datanode** - Via daemon command, the HDDS data nodes can be started or - stopped. - * **envvars** - Display computed Hadoop environment variables. - * **freon** - Runs the ozone load generator. - * **genesis** - Developer Only, Ozone micro-benchmark application. - * **getozoneconf** - Reads ozone config values from configuration. - * **jmxget** - Get JMX exported values from NameNode or DataNode. - * **om** - Ozone Manager, via daemon command can be started or stopped. - * **sh** - Primary command line interface for ozone. - * **scm** - Storage Container Manager service, via daemon can be - stated or stopped. - * **scmcli** - Developer only, Command Line Interface for the Storage - Container Manager. - * **version** - Prints the version of Ozone and HDDS. - * **genconf** - Generate minimally required ozone configs and output to - ozone-site.xml. - -## Understanding Ozone command shell -The most used command when working with Ozone is the Ozone command shell. -Ozone command shell gives a command shell interface to work against -Ozone. - -The Ozone shell commands take the following format. - -> _ozone sh object action url_ - -**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is -invoked via ```sh``` command. - -The object can be a volume, bucket or a key. The action is various verbs like - create, list, delete etc. - - -Ozone URL can point to a volume, bucket or keys in the following format: - -_\[scheme\]\[server:port\]/volume/bucket/key_ - - -Where, - -1. Scheme - Can be one of the following - * o3 - Ozone's native RPC protocol. If you specify this scheme, the - native RPC protocol is used while communicating with Ozone Manager and - data nodes. - * http/https - If an HTTP protocol is specified, then Ozone shell assumes - that you are interested in using the Ozone Rest protocol and falls back - to using the REST protocol instead of RPC. - If no protocol is specified, the Ozone shell defaults to the native RPC - protocol. - -2. Server:Port - This is the address of the Ozone Manager. This can be server - only, in that case, the default port is used. If this value is omitted -then the defaults specified in the ozone-site.xml will be used for Ozone -Manager address. - -Depending on the call, the volume/bucket/key names will be part of the URL. -Please see volume commands, bucket commands, and key commands section for more -detail. - -## Invoking help - -Ozone shell help can be invoked at _object_ level or at _action_ level. -For example: - -{{< highlight bash >}} -ozone sh volume --help -{{< /highlight >}} - -This will show all possible actions for volumes. - -or it can be invoked to explain a specific action like -{{< highlight bash >}} -ozone sh volume create --help -{{< /highlight >}} -This command will give you command line options of the create command. - diff --git a/hadoop-ozone/docs/content/Concepts.md b/hadoop-ozone/docs/content/Concepts.md deleted file mode 100644 index 7f7dd3b48995e..0000000000000 --- a/hadoop-ozone/docs/content/Concepts.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Architecture -date: "2017-10-10" -menu: main ---- - - - -Ozone is a redundant, distributed object store build by -leveraging primitives present in HDFS. The primary design point of ozone is scalability, and it aims to scale to billions of objects. - -Ozone consists of volumes, buckets, and keys. A volume is similar to a home directory in the ozone world. Only an administrator can create it. Volumes are used to store buckets. Once a volume is created users can create as many buckets as needed. Ozone stores data as keys which live inside these buckets. - -Ozone namespace is composed of many storage volumes. Storage volumes are also used as the basis for storage accounting. - -To access a key, an Ozone URL has the following format: - -``` -http://servername:port/volume/bucket/key -``` - -Where the server name is the name of a data node, the port is the data node HTTP port. The volume represents the name of the ozone volume; bucket is an ozone bucket created by the user and key represents the file. - -Please look at the [command line interface]({{< ref "CommandShell.md#shell" >}}) for more info. - -Ozone supports both REST and RPC protocols. Clients can choose either of these protocols to communicate with Ozone. Please see the [client documentation]({{< ref "JavaApi.md" >}}) for more details. - -Ozone separates namespace management and block space management; this helps -ozone to scale much better. The namespace is managed by a daemon called -[Ozone Manager ]({{< ref "OzoneManager.md" >}}) (OM), and block space is -managed by [Storage Container Manager] ({{< ref "Hdds.md" >}}) (SCM). - -The data nodes provide replication and ability to store blocks; these blocks are stored in groups to reduce the metadata pressure on SCM. This groups of blocks are called storage containers. Hence the block manager is called storage container -manager. - -Ozone Overview --------------- - -
The following diagram is a high-level overview of the core components of Ozone.

 - -![Architecture diagram](../../OzoneOverview.svg) - -The main elements of Ozone are
: - -### Ozone Manager
 - -[Ozone Manager]({{< ref "OzoneManager.md" >}}) (OM) takes care of the Ozone's namespace. -All ozone objects like volumes, buckets, and keys are managed by OM. In Short, OM is the metadata manager for Ozone. -OM talks to blockManager(SCM) to get blocks and passes it on to the Ozone -client. Ozone client writes data to these blocks. -OM will eventually be replicated via Apache Ratis for High Availability.
 - -### Storage Container Manager - -[Storage Container Manager]({{< ref "Hdds.md" >}}) (SCM) is the block and cluster manager for Ozone. -SCM along with data nodes offer a service called 'storage containers'. -A storage container is a group unrelated of blocks that are managed together as a single entity. - -SCM offers the following abstractions.

 - -![SCM Abstractions](../../SCMBlockDiagram.png) - -### Blocks -Blocks are similar to blocks in HDFS. They are replicated store of data. Client writes data to blocks. - -### Containers -A collection of blocks replicated and managed together. - -### Pipelines -SCM allows each storage container to choose its method of replication. -For example, a storage container might decide that it needs only one copy of a block -and might choose a stand-alone pipeline. Another storage container might want to have a very high level of reliability and pick a RATIS based pipeline. In other words, SCM allows different kinds of replication strategies to co-exist. The client while writing data, chooses a storage container with required properties. - -### Pools -A group of data nodes is called a pool. For scaling purposes, -we define a pool as a set of machines. This makes management of data nodes easier. - -### Nodes -The data node where data is stored. SCM monitors these nodes via heartbeat. - -### Clients -Ozone ships with a set of clients. Ozone [CLI]({{< ref "CommandShell.md#shell" >}}) is the command line interface like 'hdfs' command.
 [Freon] ({{< ref "Freon.md" >}}) is a load generation tool for Ozone.
 - -### REST Handler -Ozone provides an RPC (Remote Procedure Call) as well as a REST (Representational State Transfer) interface. This allows clients to be written in many languages quickly. Ozone strives to maintain an API compatibility between REST and RPC. -For most purposes, a client can make one line change to switch from REST to RPC or vice versa. 
 - -### Ozone File System -Ozone file system (TODO: Add documentation) is a Hadoop compatible file system. This allows Hadoop services and applications like Hive and Spark to run against -Ozone without any change. - -### Ozone Client -This is similar to DFSClient in HDFS. This is the standard client to talk to Ozone. All other components that we have discussed so far rely on Ozone client. Ozone client supports both RPC and REST protocols. diff --git a/hadoop-ozone/docs/content/Dozone.md b/hadoop-ozone/docs/content/Dozone.md deleted file mode 100644 index 7906cf3fd4e8b..0000000000000 --- a/hadoop-ozone/docs/content/Dozone.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "Dozone & Dev Tools" -date: 2017-08-10 -menu: - main: - parent: Tools ---- - - - -Dozone stands for docker for ozone. Ozone supports docker to make it easy to develop and test ozone. Starting a docker based ozone container is simple. - -In the `compose/ozone` directory there are two files that define the docker and ozone settings. - -Developers can - -{{< highlight bash >}} -cd compose/ozone -{{< /highlight >}} - -and simply run - -{{< highlight bash >}} -docker-compose up -d -{{< /highlight >}} - -to run a ozone cluster on docker. - -This command will launch a Namenode, OM, SCM and a data node. - -To access the OM UI, one can run 'http://localhost:9874'. - -_Please note_: dozone does not map the data node ports to the 9864. Instead, it maps to the ephemeral port range. So many examples in the command shell will not work if you run those commands from the host machine. To find out where the data node port is listening, you can run the `docker ps` command or always ssh into a container before running ozone commands. - -To shutdown a running docker based ozone cluster, please run - -{{< highlight bash >}} -docker-compose down -{{< /highlight >}} - - -Adding more config settings ---------------------------- -The file called `docker-config` contains all ozone specific config settings. This file is processed to create the ozone-site.xml. - -Useful Docker & Ozone Commands ------------------------------- - -If you make any modifications to ozone, the simplest way to test it is to run freon and unit tests. - -Here are the instructions to run corona in a docker based cluster. - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} - -This will open a bash shell on the data node container. -Now we can execute corona for load generation. - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 -{{< /highlight >}} - -Here is a set helpful commands while working with docker for ozone. -To check the status of the components: - -{{< highlight bash >}} -docker-compose ps -{{< /highlight >}} - -To get logs from a specific node/service: - -{{< highlight bash >}} -docker-compose logs scm -{{< /highlight >}} - - -As the WebUI ports are forwarded to the external machine, you can check the web UI: - -* For the Storage Container Manager: http://localhost:9876 -* For the Ozone Managerr: http://localhost:9874 -* For the Datanode: check the port with docker ps (as there could be multiple data node ports are mapped to the ephemeral port range) -* For the Namenode: http://localhost:9870 - -You can start multiple data nodes with: - -{{< highlight bash >}} -docker-compose scale datanode=3 -{{< /highlight >}} - -You can test the commands from the [Ozone CLI]({{< ref "CommandShell.md#shell" >}}) after opening a new bash shell in one of the containers: - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} diff --git a/hadoop-ozone/docs/content/Freon.md b/hadoop-ozone/docs/content/Freon.md deleted file mode 100644 index 6ef0280717e66..0000000000000 --- a/hadoop-ozone/docs/content/Freon.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Freon -date: "2017-09-02T23:58:17-07:00" -menu: - main: - parent: Tools ---- - - -Overview --------- - -Freon is a load-generator for Ozone. This tool is used for testing the functionality of ozone. - -### Random keys - -In randomkeys mode, the data written into ozone cluster is randomly generated. -Each key will be of size 10 KB. - -The number of volumes/buckets/keys can be configured. The replication type and -factor (eg. replicate with ratis to 3 nodes) Also can be configured. - -For more information use - -`bin/ozone freon --help` - -### Example - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 --replicationType=RATIS --factor=THREE -{{< /highlight >}} - -{{< highlight bash >}} -*************************************************** -Status: Success -Git Base Revision: 48aae081e5afacbb3240657556b26c29e61830c3 -Number of Volumes created: 10 -Number of Buckets created: 100 -Number of Keys added: 1000 -Ratis replication factor: THREE -Ratis replication type: RATIS -Average Time spent in volume creation: 00:00:00,035 -Average Time spent in bucket creation: 00:00:00,319 -Average Time spent in key creation: 00:00:03,659 -Average Time spent in key write: 00:00:10,894 -Total bytes written: 10240000 -Total Execution time: 00:00:16,898 -*********************** -{{< /highlight >}} diff --git a/hadoop-ozone/docs/content/Hdds.md b/hadoop-ozone/docs/content/Hdds.md deleted file mode 100644 index 9978c26e846a8..0000000000000 --- a/hadoop-ozone/docs/content/Hdds.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "Hadoop Distributed Data Store" -date: "2017-09-14" -menu: - main: - parent: Architecture -weight: 10 ---- - - -SCM Overview ------------- - -Storage Container Manager or SCM is a very important component of ozone. SCM -offers block and container-based services to Ozone Manager. A container is a -collection of unrelated blocks under ozone. SCM and data nodes work together -to maintain the replication levels needed by the cluster. - -It is easier to look at a putKey operation to understand the role that SCM plays. - -To put a key, a client makes a call to KSM with the following arguments. - --- putKey(keyName, data, pipeline type, replication count) - -1. keyName - refers to the file name. -2. data - The data that the client wants to write. -3. pipeline type - Allows the client to select the pipeline type. A pipeline - refers to the replication strategy used for replicating a block. Ozone - currently supports Stand Alone and Ratis as two different pipeline types. -4. replication count - This specifies how many copies of the block replica should be maintained. - -In most cases, the client does not specify the pipeline type and replication - count. The default pipeline type and replication count are used. - - -Ozone Manager when it receives the putKey call, makes a call to SCM asking -for a pipeline instance with the specified property. So if the client asked -for RATIS replication strategy and a replication count of three, then OM -requests SCM to return a set of data nodes that meet this capability. - -If SCM can find this a pipeline ( that is a set of data nodes) that can meet -the requirement from the client, then those nodes are returned to OM. OM will -persist this info and return a tuple consisting of {BlockID, ContainerName, and Pipeline}. - -If SCM is not able to find a pipeline, then SCM creates a logical pipeline and then returns it. - - -SCM manages blocks, containers, and pipelines. To return healthy pipelines, -SCM also needs to understand the node health. So SCM listens to heartbeats -from data nodes and acts as the node manager too. diff --git a/hadoop-ozone/docs/content/JavaApi.md b/hadoop-ozone/docs/content/JavaApi.md deleted file mode 100644 index 1d32bed1ecf57..0000000000000 --- a/hadoop-ozone/docs/content/JavaApi.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Java API" -date: "2017-09-14" -menu: - main: - parent: "Client" ---- - - -Introduction -------------- - -Ozone ships with it own client library, that supports both RPC(Remote -Procedure call) and REST(Representational State Transfer). This library is -the primary user interface to ozone. - -It is trivial to switch from RPC to REST or vice versa, by setting the -property _ozone.client.protocol_ in the configuration or by calling the -appropriate factory method. - -## Creating an Ozone client -The Ozone client factory creates the ozone client. It allows the user to -specify the protocol of communication. For example, to get an REST client, we -can use - -{{< highlight java >}} -OzoneClient ozClient = OzoneClientFactory.getRestClient(); -{{< /highlight >}} - -And to get a a RPC client we can call - -{{< highlight java >}} -OzoneClient ozClient = OzoneClientFactory.getRpcClient(); -{{< /highlight >}} - -If the user want to create a client based on the configuration, then they can -call - -{{< highlight java >}} -OzoneClient ozClient = OzoneClientFactory.getClient(); -{{< /highlight >}} - -and an appropriate client based on configuration will be returned. - -## Writing data using Ozone Client - -The hierarchy of data inside ozone is a volume, bucket and a key. A volume -is a collection of buckets. A bucket is a collection of keys. To write data -to the ozone, you need a volume, bucket and a key. - -### Creating a Volume - -Once we have a client, we need to get a reference to the ObjectStore. This -is done via - -{{< highlight java >}} -ObjectStore objectStore = ozClient.getObjectStore(); -{{< /highlight >}} - -An object store represents an active cluster against which the client is working. - -{{< highlight java >}} -// Let us create a volume to store our game assets. -// This uses default arguments for creating that volume. -objectStore.createVolume(“assets”); - -// Let us verify that the volume got created. -OzoneVolume assets = objectStore.getVolume(“assets”); -{{< /highlight >}} - - -It is possible to pass an array of arguments to the createVolume by creating volume arguments. - -### Creating a Bucket - -Once you have a volume, you can create buckets inside the volume. - -{{< highlight bash >}} -// Let us create a bucket called videos. -assets.createBucket(“videos”); -Ozonebucket video = assets.getBucket(“videos”); -{{< /highlight >}} - -At this point we have a usable volume and a bucket. Our volume is called assets and bucket is called videos. - -Now we can create a Key. - -### Reading and Writing a Key - -With a bucket object the users can now read and write keys. The following code reads a video called intro.mp4 from the local disk and stores in the video bucket that we just created. - -{{< highlight bash >}} -// read data from the file, this is a user provided function. -byte [] vidoeData = readFile(“into.mp4”); - -// Create an output stream and write data. -OzoneOutputStream videoStream = video.createKey(“intro.mp4”, 1048576); -videoStream.write(videoData); - -// Close the stream when it is done. - videoStream.close(); - - -// We can use the same bucket to read the file that we just wrote, by creating an input Stream. -// Let us allocate a byte array to hold the video first. -byte[] data = new byte[(int)1048576]; -OzoneInputStream introStream = video.readKey(“intro.mp4”); -// read intro.mp4 into the data buffer -introStream.read(data); -introStream.close(); -{{< /highlight >}} - - -Here is a complete example of the code that we just wrote. Please note the close functions being called in this program. - -{{< highlight java >}} -// Let us create a client -OzoneClient ozClient = OzoneClientFactory.getClient(); - -// Get a reference to the ObjectStore using the client -ObjectStore objectStore = ozClient.getObjectStore(); - -// Let us create a volume to store our game assets. -// This default arguments for creating that volume. -objectStore.createVolume(“assets”); - -// Let us verify that the volume got created. -OzoneVolume assets = objectStore.getVolume(“assets”); - -// Let us create a bucket called videos. -assets.createBucket(“videos”); -Ozonebucket video = assets.getBucket(“videos”); - -// read data from the file, this is assumed to be a user provided function. -byte [] vidoeData = readFile(“into.mp4”); - -// Create an output stream and write data. -OzoneOutputStream videoStream = video.createKey(“intro.mp4”, 1048576); -videoStream.write(videoData); - -// Close the stream when it is done. - videoStream.close(); - - -// We can use the same bucket to read the file that we just wrote, by creating an input Stream. -// Let us allocate a byte array to hold the video first. - -byte[] data = new byte[(int)1048576]; -OzoneInputStream introStream = video.readKey(“into.mp4”); -introStream.read(data); - -// Close the stream when it is done. -introStream.close(); - -// Close the client. -ozClient.close(); -{{< /highlight >}} diff --git a/hadoop-ozone/docs/content/KeyCommands.md b/hadoop-ozone/docs/content/KeyCommands.md deleted file mode 100644 index 0139a288c9fb7..0000000000000 --- a/hadoop-ozone/docs/content/KeyCommands.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Key Commands -menu: - main: - parent: Client - weight: 3 ---- - - -Ozone shell supports the following key commands. - - * [get](#get) - * [put](#put) - * [delete](#delete) - * [info](#info) - * [list](#list) - - -### Get - -The key get command downloads a key from Ozone cluster to local file system. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to download the key to. - - -{{< highlight bash >}} -ozone sh key get /hive/jan/sales.orc sales.orc -{{< /highlight >}} -Downloads the file sales.orc from the _/hive/jan_ bucket and writes to the -local file sales.orc. - -### Put - -Uploads a file from the local file system to the specified bucket. - -***Params:*** - - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to upload. -| -r, --replication | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration. - -{{< highlight bash >}} -ozone sh key put /hive/jan/corrected-sales.orc sales.orc -{{< /highlight >}} -The above command will put the sales.orc as a new key into _/hive/jan/corrected-sales.orc_. - -### Delete - -The delete key command removes the key from the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key delete /hive/jan/corrected-sales.orc -{{< /highlight >}} - -The above command deletes the key _/hive/jan/corrected-sales.orc_. - - -### Info - -The key info commands returns the information about the key. -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key info /hive/jan/sales.orc -{{< /highlight >}} - -The above command will print out the information about _/hive/jan/sales.orc_ -key. - -### List - -The key list commands allows user to list all keys in a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, --length | Maximum number of results to return. Default: 1000 -| -p, --prefix | Optional, Only buckets that match this prefix will be returned. -| -s, --start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh key list /hive/jan -{{< /highlight >}} - -This command will list all key in the bucket _/hive/jan_. - - - - - -You can try out these commands from the docker instance of the [Alpha -Cluster](runningviadocker.html). diff --git a/hadoop-ozone/docs/content/OzoneFS.md b/hadoop-ozone/docs/content/OzoneFS.md deleted file mode 100644 index d0621bee6dd3c..0000000000000 --- a/hadoop-ozone/docs/content/OzoneFS.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Ozone File System -date: 2017-09-14 -menu: main -menu: - main: - parent: Client ---- - - -There are many Hadoop compatible files systems under Hadoop. Hadoop compatible file systems ensures that storage backends like Ozone can easily be integrated into Hadoop eco-system. - -## Setting up the Ozone file system - -To create an ozone file system, we have to choose a bucket where the file system would live. This bucket will be used as the backend store for OzoneFileSystem. All the files and directories will be stored as keys in this bucket. - -Please run the following commands to create a volume and bucket, if you don't have them already. - -{{< highlight bash >}} -ozone sh volume create /volume -ozone sh bucket create /volume/bucket -{{< /highlight >}} - -Once this is created, please make sure that bucket exists via the listVolume or listBucket commands. - -Please add the following entry to the core-site.xml. - -{{< highlight xml >}} - - fs.o3.impl - org.apache.hadoop.fs.ozone.OzoneFileSystem - - - fs.default.name - o3://localhost:9864/volume/bucket - -{{< /highlight >}} - -This will make this bucket to be the default file system for HDFS dfs commands and register the o3 file system type.. - -You also need to add the ozone-filesystem.jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozonefs/hadoop-ozone-filesystem.jar:$HADOOP_CLASSPATH -{{< /highlight >}} - - - - -Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. -For example, - -{{< highlight bash >}} -hdfs dfs -ls / -{{< /highlight >}} - -or - -{{< highlight bash >}} -hdfs dfs -mkdir /users -{{< /highlight >}} - - -Or put command etc. In other words, all programs like Hive, Spark, and Distcp will work against this file system. -Please note that any keys created/deleted in the bucket using methods apart from OzoneFileSystem will show up as diectories and files in the Ozone File System. diff --git a/hadoop-ozone/docs/content/OzoneManager.md b/hadoop-ozone/docs/content/OzoneManager.md deleted file mode 100644 index 560f827a58d3e..0000000000000 --- a/hadoop-ozone/docs/content/OzoneManager.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Ozone Manager" -date: "2017-09-14" -menu: - main: - parent: Architecture -weight: 11 ---- - - -OM Overview -------------- - -Ozone Manager or OM is the namespace manager for Ozone. The clients (RPC clients, Rest proxy, Ozone file system, etc.) communicate with OM to create and delete various ozone objects. - -Each ozone volume is the root of a namespace under OM. This is very different from HDFS which provides a single rooted file system. - -Ozone's namespace is a collection of volumes or is a forest instead of a -single rooted tree as in HDFS. This property makes it easy to deploy multiple - OMs for scaling, this feature is under development. - -OM Metadata ------------------ - -Conceptually, OM maintains a list of volumes, buckets, and keys. For each user, it maintains a list of volumes. For each volume, the list of buckets and for each bucket the list of keys. - -Right now, OM is a single instance service. Ozone already relies on Apache Ratis (A Replicated State Machine based on Raft protocol). OM will be extended to replicate all its metadata via Ratis. With that, OM will be highly available. - -OM UI ------------- - -OM supports a simple UI for the time being. The default port of OM is 9874. To access the OM UI, the user can connect to http://OM:port or for a concrete example, -``` -http://omserver:9874/ -``` -OM UI primarily tries to measure load and latency of OM. The first section of OM UI relates to the number of operations seen by the cluster broken down by the object, operation and whether the operation was successful. - -The latter part of the UI is focused on latency and number of operations that OM is performing. - -One of the hardest problems in HDFS world is discovering the numerous settings offered to tune HDFS. Ozone solves that problem by tagging the configs. To discover settings, click on "Common Tools"->Config. This will take you to the ozone config UI. - -Config UI ------------- - -The ozone config UI is a matrix with row representing the tags, and columns representing All, OM and SCM. - -Suppose a user wanted to discover the required settings for ozone. Then the user can tick the checkbox that says "Required." -This will filter out all "Required" settings along with the description of what each setting does. - -The user can combine different checkboxes and UI will combine the results. That is, If you have more than one row selected, then all keys for those chosen tags are displayed together. - -We are hopeful that this leads to a more straightforward way of discovering settings that manage ozone. - - -OM and SCM -------------------- -[Storage container manager]({{< ref "Hdds.md" >}}) or (SCM) is the block manager - for ozone. When a client requests OM for a set of data nodes to write data, OM talk to SCM and gets a block. - -A block returned by SCM contains a pipeline, which is a set of nodes that we participate in that block replication. - -So OM is dependent on SCM for reading and writing of Keys. However, OM is independent of SCM while doing metadata operations like ozone volume or bucket operations. diff --git a/hadoop-ozone/docs/content/RealCluster.md b/hadoop-ozone/docs/content/RealCluster.md deleted file mode 100644 index 9d86c8458a395..0000000000000 --- a/hadoop-ozone/docs/content/RealCluster.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Starting an Ozone Cluster -weight: 1 -menu: - main: - parent: Starting - weight: 3 ---- - - -Before we boot up the Ozone cluster, we need to initialize both SCM and Ozone Manager. - -{{< highlight bash >}} -ozone scm -init -{{< /highlight >}} -This allows SCM to create the cluster Identity and initialize its state. -The ```init``` command is similar to Namenode format. Init command is executed only once, that allows SCM to create all the required on-disk structures to work correctly. -{{< highlight bash >}} -ozone --daemon start scm -{{< /highlight >}} - -Once we know SCM is up and running, we can create an Object Store for our use. This is done by running the following command. - -{{< highlight bash >}} -ozone om -createObjectStore -{{< /highlight >}} - - -Once Ozone manager has created the Object Store, we are ready to run the name -services. - -{{< highlight bash >}} -ozone --daemon start om -{{< /highlight >}} - -At this point Ozone's name services, the Ozone manager, and the block service SCM is both running. -**Please note**: If SCM is not running -```createObjectStore``` command will fail. SCM start will fail if on-disk data structures are missing. So please make sure you have done both ```init``` and ```createObjectStore``` commands. - -Now we need to start the data nodes. Please run the following command on each datanode. -{{< highlight bash >}} -ozone --daemon start datanode -{{< /highlight >}} - -At this point SCM, Ozone Manager and data nodes are up and running. - -***Congratulations!, You have set up a functional ozone cluster.*** - -------- -If you want to make your life simpler, you can just run -{{< highlight bash >}} -ozone scm -init -ozone om -createObjectStore -start-ozone.sh -{{< /highlight >}} -This assumes that you have set up the slaves file correctly and ssh -configuration that allows ssh-ing to all data nodes. This is the same as the -HDFS configuration, so please refer to HDFS documentation on how to set this -up. diff --git a/hadoop-ozone/docs/content/Rest.md b/hadoop-ozone/docs/content/Rest.md deleted file mode 100644 index a25d3ab0ae67f..0000000000000 --- a/hadoop-ozone/docs/content/Rest.md +++ /dev/null @@ -1,544 +0,0 @@ ---- -title: REST API -menu: - main: - parent: Client ---- - - -The Ozone REST API's allows user to access ozone via REST protocol. - -## Authentication and Authorization - -For time being, The default authentication mode of REST API is insecure access -mode, which is *Simple* mode. Under this mode, ozone server trusts the user -name specified by client and it does not perform any authentication. - -User name can be specified in HTTP header by - -* `x-ozone-user: {USER_NAME}` - -for example if add following header *x-ozone-user: bilbo* in the HTTP request, -then operation will be executed as *bilbo* user. -In *Simple* mode, there is no real authorization either. Client can be -authorized to obtain administrator privilege by using HTTP header - -* `Authorization: {AUTH_METHOD} {SIGNATURE}` - -for example set following header *Authorization: OZONE root* in the HTTP request, -then ozone will authorize the client with administrator privilege. - -## Common REST Headers - -The following HTTP headers must be set for each REST call. - -| Property | Description | -|:---- |:---- -| Authorization | The authorization field determines which authentication method is used by ozone. Currently only *simple* mode is supported, the corresponding value is *OZONE*. Optionally an user name can be set as *OZONE {USER_NAME}* to authorize as a particular user. | -| Date | Standard HTTP header that represents dates. The format is - day of the week, month, day, year and time (military time format) in GMT. Any other time zone will be rejected by ozone server. Eg. *Date : Mon, Apr 4, 2016 06:22:00 GMT*. This field is required. | -| x-ozone-version | A required HTTP header to indicate which version of API this call will be communicating to. E.g *x-ozone-version: v1*. Currently ozone only publishes v1 version API. | - -## Common Reply Headers - -The common reply headers are part of all Ozone server replies. - -| Property | Description | -|:---- |:---- -| Date | This is the HTTP date header and it is set to server’s local time expressed in GMT. | -| x-ozone-request-id | This is a UUID string that represents an unique request ID. This ID is used to track the request through the ozone system and is useful for debugging purposes. | -| x-ozone-server-name | Fully qualified domain name of the sever which handled the request. | - -## Volume APIs - -### Create a Volume - -This API allows admins to create a new storage volume. - -Schema: - -- `POST /{volume}?quota=` - -Query Parameter: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| quota | long | Optional. Quota size in BYTEs, MBs, GBs or TBs | - -Sample HTTP POST request: - - curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" "http://localhost:9880/volume-to-create" - -this request creates a volume as user *bilbo*, the authorization field is set to *OZONE root* because this call requires administration privilege. The client receives a response with zero content length. - - HTTP/1.1 201 Created - x-ozone-server-name: localhost - x-ozone-request-id: 2173deb5-bbb7-4f0a-8236-f354784e3bae - Date: Tue, 27 Jun 2017 07:42:04 GMT - Content-Type: application/octet-stream - Content-Length: 0 - Connection: keep-alive - -### Update Volume - -This API allows administrators to update volume info such as ownership and quota. This API requires administration privilege. - -Schema: - -- `PUT /{volume}?quota=` - -Query Parameter: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| quota | long \| remove | Optional. Quota size in BYTEs, MBs, GBs or TBs. Or use string value *remove* to remove an existing quota for a volume. | - -Sample HTTP PUT request: - - curl -X PUT -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: john" http://localhost:9880/volume-to-update - -this request modifies the owner of */volume-to-update* to *john*. - -### Delete Volume - -This API allows user to delete a volume owned by themselves if the volume is not empty. Administrators can delete volumes owned by any user. - -Schema: - -- `DELETE /{volume}` - -Sample HTTP DELETE request: - - curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: bilbo" http://localhost:9880/volume-to-delete - -this request deletes an empty volume */volume-to-delete*. The client receives a zero length content. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: 6af14c64-e3a9-40fe-9634-df60b7cbbc6a - Date: Tue, 27 Jun 2017 08:49:52 GMT - Content-Type: application/octet-stream - Content-Length: 0 - Connection: keep-alive - -### Info Volume - -This API allows user to read the info of a volume owned by themselves. Administrators can read volume info owned by any user. - -Schema: - -- `GET /{volume}?info=volume` - -Query Parameter: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| info | "volume" | Required and enforced with this value. | - -Sample HTTP GET request: - - curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo?info=volume" - -this request gets the info of volume */volume-of-bilbo*, the client receives a response with a JSON object of volume info. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: a2224806-beaf-42dd-a68e-533cd7508f74 - Date: Tue, 27 Jun 2017 07:55:35 GMT - Content-Type: application/octet-stream - Content-Length: 171 - Connection: keep-alive - - { - "owner" : { "name" : "bilbo" }, - "quota" : { "unit" : "TB", "size" : 1048576 }, - "volumeName" : "volume-of-bilbo", - "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT", - "createdBy" : "root" - } - -### List Volumes - -This API allows user to list all volumes owned by themselves. Administrators can list all volumes owned by any user. - -Schema: - -- `GET /?prefix=&max-keys=&prev-key=` - -Query Parameter: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| prefix | string | Optional. Only volumes with this prefix are included in the result. | -| max-keys | int | Optional. Maximum number of volumes included in the result. Default is 1024 if not specified. | -| prev-key | string | Optional. Volume name from where listing should start, this key is excluded in the result. It must be a valid volume name. | -| root-scan | bool | Optional. List all volumes in the cluster if this is set to true. Default false. | - -Sample HTTP GET request: - - curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/?max-keys=100&prefix=Jan" - -this request gets all volumes owned by *bilbo* and each volume's name contains prefix *Jan*, the result at most contains *100* entries. The client receives a list of SON objects, each of them describes the info of a volume. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: 7fa0dce1-a8bd-4387-bc3c-1dac4b710bb1 - Date: Tue, 27 Jun 2017 08:07:04 GMT - Content-Type: application/octet-stream - Content-Length: 602 - Connection: keep-alive - - { - "volumes" : [ - { - "owner" : { "name" : "bilbo"}, - "quota" : { "unit" : "TB", "size" : 2 }, - "volumeName" : "Jan-vol1", - "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT", - "createdBy" : root - }, - ... - ] - } - -## Bucket APIs - -### Create Bucket - -This API allows an user to create a bucket in a volume. - -Schema: - -- `POST /{volume}/{bucket}` - -Additional HTTP Headers: - -| HTTP Header | Value | Description | -|:---- |:---- |:---- -| x-ozone-acl | ozone ACLs | Optional. Ozone acls. | -| x-ozone-storage-class | | Optional. Storage type for a volume. | -| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket versioning or not. | - -Sample HTTP POST request: - - curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" http://localhost:9880/volume-of-bilbo/bucket-0 - -this request creates a bucket *bucket-0* under volume *volume-of-bilbo*. - - HTTP/1.1 201 Created - x-ozone-server-name: localhost - x-ozone-request-id: 49acfeec-4c85-470a-872b-2eaebd8d751e - Date: Tue, 27 Jun 2017 08:55:25 GMT - Content-Type: application/octet-stream - Content-Length: 0 - Connection: keep-alive - -### Update Bucket - -Updates bucket meta-data, like ACLs. - -Schema: - -- `PUT /{volume}/{bucket}` - -Additional HTTP Headers: - -| HTTP Header | Value | Description | -|:---- |:---- |:---- -| x-ozone-acl | ozone ACLs | Optional. Ozone acls. | -| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket versioning or not. | - -Sample HTTP PUT request: - - curl -i -X PUT -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" -H "x-ozone-acl: ADD user:peregrin:rw" http://localhost:9880/volume-of-bilbo/bucket-to-update - -this request adds an ACL policy specified by HTTP header *x-ozone-acl* to bucket */volume-of-bilbo/bucket-to-update*, the ACL field *ADD user:peregrin:rw* gives add additional read/write permission to user *peregrin* to this bucket. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: b061a295-5faf-4b98-94b9-8b3e87c8eb5e - Date: Tue, 27 Jun 2017 09:02:37 GMT - Content-Type: application/octet-stream - Content-Length: 0 - Connection: keep-alive - -### Delete Bucket - -Deletes a bucket if it is empty. An user can only delete bucket owned by themselves, and administrators can delete buckets owned by any user, as long as it is empty. - -Schema: - -- `DELETE /{volume}/{bucket}` - -Sample HTTP DELETE request: - - curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0" - -this request deletes bucket */volume-of-bilbo/bucket-0*. The client receives a zero length content response. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: f57acd7a-2116-4c2f-aa2f-5a483db81c9c - Date: Tue, 27 Jun 2017 09:16:52 GMT - Content-Type: application/octet-stream - Content-Length: 0 - Connection: keep-alive - - -### Info Bucket - -This API returns information about a given bucket. - -Schema: - -- `GET /{volume}/{bucket}?info=bucket` - -Query Parameters: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| info | "bucket" | Required and enforced with this value. | - -Sample HTTP GET request: - - curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0?info=bucket" - -this request gets the info of bucket */volume-of-bilbo/bucket-0*. The client receives a response of JSON object contains bucket info. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: f125485b-8cae-4c7f-a2d6-5b1fefd6f193 - Date: Tue, 27 Jun 2017 09:08:31 GMT - Content-Type: application/json - Content-Length: 138 - Connection: keep-alive - - { - "volumeName" : "volume-of-bilbo", - "bucketName" : "bucket-0", - "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT", - "acls" : [ ], - "versioning" : "DISABLED", - "storageType" : "DISK" - } - -### List Buckets - -List buckets in a given volume. - -Schema: - -- `GET /{volume}?prefix=&max-keys=&prev-key=` - -Query Parameters: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| prefix | string | Optional. Only buckets with this prefix are included in the result. | -| max-keys | int | Optional. Maximum number of buckets included in the result. Default is 1024 if not specified. | -| prev-key | string | Optional. Bucket name from where listing should start, this key is excluded in the result. It must be a valid bucket name. | - -Sample HTTP GET request: - - curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo?max-keys=10" - -this request lists all the buckets under volume *volume-of-bilbo*, and the result at most contains 10 entries. The client receives response of a array of JSON objects, each of them represents for a bucket info. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: e048c3d5-169c-470f-9903-632d9f9e32d5 - Date: Tue, 27 Jun 2017 09:12:18 GMT - Content-Type: application/octet-stream - Content-Length: 207 - Connection: keep-alive - - { - "buckets" : [ { - "volumeName" : "volume-of-bilbo", - "bucketName" : "bucket-0", - "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT", - "acls" : [ ], - "versioning" : null, - "storageType" : "DISK", - "bytesUsed" : 0, - "keyCount" : 0 - }, - ... - ] - } - -## Key APIs - -### Put Key - -This API allows user to create or overwrite keys inside of a bucket. - -Schema: - -- `PUT /{volume}/{bucket}/{key}` - -Additional HTTP headers: - -| HTTP Header | Value | Description | -|:---- |:---- |:---- -| Content-MD5 | MD5 digest | Standard HTTP header, file hash. | - -Sample PUT HTTP request: - - curl -X PUT -T /path/to/localfile -H "Authorization:OZONE" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0" - -this request uploads a local file */path/to/localfile* specified by option *-T* to ozone as user *bilbo*, mapped to ozone key */volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content response. - -### Get Key - -This API allows user to get or download a key from an ozone bucket. - -Schema: - -- `GET /{volume}/{bucket}/{key}` - -Sample HTTP GET request: - - curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0" - -this request reads the content of key */volume-of-bilbo/bucket-0/file-0*. If the content of the file is plain text, it can be directly dumped onto stdout. - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - x-ozone-server-name: localhost - x-ozone-request-id: 1bcd7de7-d8e3-46bb-afee-bdc933d383b8 - Date: Tue, 27 Jun 2017 09:35:29 GMT - Content-Length: 6 - Connection: keep-alive - - Hello Ozone! - -if the file is not plain text, specify *-O* option in curl command and the file *file-0* will be downloaded into current working directory, file name will be same as the key. A sample request like following: - - curl -O -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0/file-1" - -response looks like following: - - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed - 100 6148k 100 6148k 0 0 24.0M 0 --:--:-- --:--:-- --:--:-- 24.1M - -### Delete Key - -This API allows user to delete a key from a bucket. - -Schema: - -- `DELETE /{volume}/{bucket}/{key}` - -Sample HTTP DELETE request: - - curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0" - -this request deletes key */volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content result: - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: f8c4a373-dd5f-4e3a-b6c4-ddf7e191fe91 - Date: Tue, 27 Jun 2017 14:19:48 GMT - Content-Type: application/octet-stream - Content-Length: 0 - Connection: keep-alive - -### Info Key - -This API returns information about a given key. - -Schema: - -- `GET /{volume}/{bucket}/{key}?info=key` - -Query Parameter: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| info | String, "key" | Required and enforced with this value. | - -Sample HTTP DELETE request: - - curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/buket-0/file-0?info=key" - -this request returns information of the key */volume-of-bilbo/bucket-0/file-0*. The client receives a JSON object listed attributes of the key. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: c674343c-a0f2-49e4-bbd6-daa73e7dc131 - Date: Mon, 03 Jul 2017 14:28:45 GMT - Content-Type: application/octet-stream - Content-Length: 73 - Connection: keep-alive - - { - "version" : 0, - "md5hash" : null, - "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT", - "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT", - "size" : 0, - "keyName" : "file-0" - } - -### List Keys - -This API allows user to list keys in a bucket. - -Schema: - -- `GET /{volume}/{bucket}?prefix=&max-keys=&prev-key=` - -Query Parameters: - -| Query Parameter | Value | Description | -|:---- |:---- |:---- -| prefix | string | Optional. Only keys with this prefix are included in the result. | -| max-keys | int | Optional. Maximum number of keys included in the result. Default is 1024 if not specified. | -| prev-key | string | Optional. Key name from where listing should start, this key is excluded in the result. It must be a valid key name. | - -Sample HTTP GET request: - - curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http:/localhost:9880/volume-of-bilbo/bucket-0/?max-keys=100&prefix=file" - -this request list keys under bucket */volume-of-bilbo/bucket-0*, the listing result is filtered by prefix *file*. The client receives an array of JSON objects, each of them represents the info of a matched key. - - HTTP/1.1 200 OK - x-ozone-server-name: localhost - x-ozone-request-id: 7f9fc970-9904-4c56-b671-83a086c6f555 - Date: Tue, 27 Jun 2017 09:48:59 GMT - Content-Type: application/json - Content-Length: 209 - Connection: keep-alive - - { - "name" : null, - "prefix" : file, - "maxKeys" : 0, - "truncated" : false, - "keyList" : [ { - "version" : 0, - "md5hash" : null, - "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT", - "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT", - "size" : 0, - "keyName" : "file-0" - }, - ... - ] - } diff --git a/hadoop-ozone/docs/content/RunningViaDocker.md b/hadoop-ozone/docs/content/RunningViaDocker.md deleted file mode 100644 index 0b8fece5fb1a9..0000000000000 --- a/hadoop-ozone/docs/content/RunningViaDocker.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Alpha Cluster -weight: 1 -menu: - main: - parent: Starting - weight: 1 ---- - - - -***This is an alpha release of Ozone. Please don't use this release in -production.*** Please check the road map page for features under -development. - -The easiest way to run ozone is to download the release tarball and launch -ozone via Docker. Docker will create a small ozone cluster on your machine, -including the data nodes and ozone services. - -## Running Ozone via Docker - - -**This assumes that you have Docker installed on the machine.** - -* Download the Ozone tarball and untar it. - -* Go to the directory where the docker compose files exist and tell -`docker-compose` to start Ozone in the background. This will start a small -ozone instance on your machine. - -{{< highlight bash >}} -cd ozone-0.2.1-SNAPSHOT/compose/ozone/ - -docker-compose up -d -{{< /highlight >}} - - -To verify that ozone is working as expected, let us log into a data node and -run _freon_, the load generator for Ozone. The ```exec datanode bash``` command -will open a bash shell on the datanode. The ozone freon command is executed -within the datanode container. You can quit freon via CTRL-C any time. The -```rk``` profile instructs freon to generate random keys. - -{{< highlight bash >}} -docker-compose exec datanode bash -ozone freon rk -{{< /highlight >}} - -You can check out the **OzoneManager UI** at http://localhost:9874/ to see the -activity generated by freon. -While you are there, please don't forget to check out the ozone configuration explorer. - -***Congratulations, You have just run your first ozone cluster.*** - -To shutdown the cluster, please run -{{< highlight bash >}} -docker-compose down -{{< /highlight >}} \ No newline at end of file diff --git a/hadoop-ozone/docs/content/RunningWithHDFS.md b/hadoop-ozone/docs/content/RunningWithHDFS.md deleted file mode 100644 index 2fd2bd6ace2df..0000000000000 --- a/hadoop-ozone/docs/content/RunningWithHDFS.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Running concurrently with HDFS -weight: 1 -menu: - main: - parent: Starting - weight: 4 ---- - - -Ozone is designed to work with HDFS. So it is easy to deploy ozone in an -existing HDFS cluster. - -Ozone does *not* support security today. It is a work in progress and tracked - in -[HDDS-4](https://issues.apache.org/jira/browse/HDDS-4). If you enable ozone -in a secure HDFS cluster, for your own protection Ozone will refuse to work. - -In other words, till Ozone security work is done, Ozone will not work in any -secure clusters. - -The container manager part of Ozone runs inside DataNodes as a pluggable module. -To activate ozone you should define the service plugin implementation class. - -

- -{{< highlight xml >}} - - dfs.datanode.plugins - org.apache.hadoop.ozone.HddsDatanodeService - -{{< /highlight >}} - -You also need to add the ozone-datanode-plugin jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin.jar -{{< /highlight >}} - - - -To start ozone with HDFS you should start the the following components: - - 1. HDFS Namenode (from Hadoop distribution) - 2. HDFS Datanode (from the Hadoop distribution with the plugin on the - classpath from the Ozone distribution) - 3. Ozone Manager (from the Ozone distribution) - 4. Storage Container manager (from the Ozone distribution) - -Please check the log of the datanode whether the HDDS/Ozone plugin is started or -not. Log of datanode should contain something like this: - -``` -2018-09-17 16:19:24 INFO HddsDatanodeService:158 - Started plug-in org.apache.hadoop.ozone.web.OzoneHddsDatanodeService@6f94fb9d -``` - - \ No newline at end of file diff --git a/hadoop-ozone/docs/content/SCMCLI.md b/hadoop-ozone/docs/content/SCMCLI.md deleted file mode 100644 index bd6086c796e2a..0000000000000 --- a/hadoop-ozone/docs/content/SCMCLI.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "SCMCLI" -date: 2017-08-10 -menu: - main: - parent: Tools ---- - - -SCM is the block service for Ozone. It is also the workhorse for ozone. But user process never talks to SCM. However, being able to read the state of SCM is useful. - -SCMCLI allows the developer to access SCM directly. Please note: Improper usage of this tool can destroy your cluster. Unless you know exactly what you are doing, Please do *not* use this tool. In other words, this is a developer only tool. We might even remove this command in future to prevent improper use. - -[^1]: This assumes that you have a working docker installation on the development machine. diff --git a/hadoop-ozone/docs/content/Settings.md b/hadoop-ozone/docs/content/Settings.md deleted file mode 100644 index 41ab04a887896..0000000000000 --- a/hadoop-ozone/docs/content/Settings.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: Configuration -weight: 1 -menu: - main: - parent: Starting - weight: 2 ---- - - - - - -If you are feeling adventurous, you can setup ozone in a real cluster. -Setting up a real cluster requires us to understand the components of Ozone. -Ozone is designed to work concurrently with HDFS. However, Ozone is also -capable of running independently. The components of ozone are the same in both approaches. - -## Ozone Components - -1. Ozone Manager - Is the server that is in charge of the namespace of Ozone. Ozone Manager is responsible for all volume, bucket and key operations. -2. Storage Container Manager - Acts as the block manager. Ozone Manager -requests blocks from SCM, to which clients can write data. -3. Datanodes - Ozone data node code runs inside the HDFS datanode or in the independent deployment case runs an ozone datanode daemon. - - - - -## Setting up an Ozone only cluster - -* Please untar the ozone-0.2.1-SNAPSHOT to the directory where you are going -to run Ozone from. We need Ozone jars on all machines in the cluster. So you -need to do this on all machines in the cluster. - -* Ozone relies on a configuration file called ```ozone-site.xml```. To -generate a template that you can replace with proper values, please run the -following command. This will generate a template called ```ozone-site.xml``` at -the specified path (directory). - -{{< highlight bash >}} -ozone genconf -{{< /highlight >}} - -Let us look at the settings inside the generated file (ozone-site.xml) and -how they control ozone. Once the right values are defined, this file -needs to be copied to ```ozone directory/etc/Hadoop```. - - -* **ozone.enabled** This is the most critical setting for ozone. -Ozone is a work in progress and users have to enable this service explicitly. -By default, Ozone is disabled. Setting this flag to `true` enables ozone in the -HDFS or Ozone cluster. - -Here is an example, - -{{< highlight xml >}} - - ozone.enabled - True - -{{< /highlight >}} - -* **ozone.metadata.dirs** Allows Administrators to specify where the - metadata must reside. Usually you pick your fastest disk (SSD if - you have them on your nodes). OzoneManager, SCM and datanode will write the - metadata to this path. This is a required setting, if this is missing Ozone - will fail to come up. - - Here is an example, - -{{< highlight xml >}} - - ozone.metadata.dirs - /data/disk1/meta - -{{< /highlight >}} - -* **ozone.scm.names** Storage container manager(SCM) is a distributed block - service which is used by ozone. This property allows data nodes to discover - SCM's address. Data nodes send heartbeat to SCM. - Until HA feature is complete, we configure ozone.scm.names to be a - single machine. - - Here is an example, - - {{< highlight xml >}} - - ozone.scm.names - scm.hadoop.apache.org - - {{< /highlight >}} - - * **ozone.scm.datanode.id** Data nodes generate a Unique ID called Datanode - ID. This identity is written to the file specified by this path. *Data nodes - will create this path if it doesn't exist already.* - -Here is an example, -{{< highlight xml >}} - - ozone.scm.datanode.id - /data/disk1/meta/node/datanode.id - -{{< /highlight >}} - -* **ozone.om.address** OM server address. This is used by OzoneClient and -Ozone File System. - -Here is an example, -{{< highlight xml >}} - - ozone.om.address - ozonemanager.hadoop.apache.org - -{{< /highlight >}} - - -### Ozone Settings Summary - -| Setting | Value | Comment | -|--------------------------------|------------------------------|------------------------------------------------------------------| -| ozone.enabled | true | This enables SCM and containers in HDFS cluster. | -| ozone.metadata.dirs | file path | The metadata will be stored here. | -| ozone.scm.names | SCM server name | Hostname:port or IP:port address of SCM. | -| ozone.scm.block.client.address | SCM server name and port | Used by services like OM | -| ozone.scm.client.address | SCM server name and port | Used by client-side | -| ozone.scm.datanode.address | SCM server name and port | Used by datanode to talk to SCM | -| ozone.om.address | OM server name | Used by Ozone handler and Ozone file system. | diff --git a/hadoop-ozone/docs/content/VolumeCommands.md b/hadoop-ozone/docs/content/VolumeCommands.md deleted file mode 100644 index 6f024ef87f4f4..0000000000000 --- a/hadoop-ozone/docs/content/VolumeCommands.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Volume Commands -menu: - main: - parent: Client - weight: 2 ---- - - -Volume commands generally need administrator privileges. The ozone shell supports the following volume commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [update](#update) - -### Create - -The volume create command allows an administrator to create a volume and -assign it to a user. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, --quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, --user | Required, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume create --quota=1TB --user=bilbo /hive -{{< /highlight >}} - -The above command will create a volume called _hive_ on the ozone cluster. This -volume has a quota of 1TB, and the owner is _bilbo_. - -### Delete - -The volume delete commands allows an administrator to delete a volume. If the -volume is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume delete /hive -{{< /highlight >}} - -The above command will delete the volume hive, if the volume has no buckets -inside it. - -### Info - -The volume info commands returns the information about the volume including -quota and owner information. -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume info /hive -{{< /highlight >}} - -The above command will print out the information about hive volume. - -### List - -The volume list command will list the volumes owned by a user. - -{{< highlight bash >}} -ozone sh volume list --user hadoop -{{< /highlight >}} - -The above command will print out all the volumes owned by the user hadoop. - -### Update - -The volume update command allows changing of owner and quota on a given volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, --quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, --user | Optional, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume update --quota=10TB /hive -{{< /highlight >}} - -The above command updates the volume quota to 10TB. - -You can try out these commands from the docker instance of the [Alpha -Cluster](runningviadocker.html). diff --git a/hadoop-ozone/docs/content/_index.md b/hadoop-ozone/docs/content/_index.md deleted file mode 100644 index e297b182fd447..0000000000000 --- a/hadoop-ozone/docs/content/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Ozone Overview -menu: main -weight: -10 ---- - - -# Apache Hadoop Ozone - -Ozone is a scalable, distributed object store for Hadoop. Applications like -Apache Spark, Hive and YARN, can run against Ozone without any -modifications. Ozone comes with a [Java client library]({{< ref "JavaApi.md" ->}}) and a [command line interface] ({{< ref "CommandShell.md#shell" >}}) which makes it easy to use Ozone. This client library supports both RPC and REST protocols. - -Ozone consists of volumes, buckets, and Keys. - -* Volumes are similar to user accounts. Only administrators can create or delete volumes. -* Buckets are similar to directories. A bucket can contain any number of keys, but buckets cannot contain other buckets. -* Keys are similar to files. A bucket can contain any number of keys. - - - -}}"> - diff --git a/hadoop-ozone/docs/dev-support/bin/generate-site.sh b/hadoop-ozone/docs/dev-support/bin/generate-site.sh deleted file mode 100755 index 374e74b8904b7..0000000000000 --- a/hadoop-ozone/docs/dev-support/bin/generate-site.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -DOCDIR="$DIR/../.." - -if [ ! "$(which hugo)" ]; then - echo "Hugo is not yet installed. Doc generation is skipped." - exit 0 -fi - -DESTDIR="$DOCDIR/target/classes/webapps/docs" -mkdir -p "$DESTDIR" -cd "$DOCDIR" -hugo -d "$DESTDIR" "$@" -cd - diff --git a/hadoop-ozone/docs/pom.xml b/hadoop-ozone/docs/pom.xml deleted file mode 100644 index d8edd15e992ed..0000000000000 --- a/hadoop-ozone/docs/pom.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.3.0-SNAPSHOT - - hadoop-ozone-docs - 0.3.0-SNAPSHOT - Apache Hadoop Ozone Documentation - Apache Hadoop Ozone Documentation - jar - - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - - exec - - compile - - - - dev-support/bin/generate-site.sh - - - - org.apache.rat - apache-rat-plugin - - - themes/ozonedoc/static/js/bootstrap.min.js - themes/ozonedoc/static/js/jquery.min.js - themes/ozonedoc/static/css/bootstrap-theme.min.css - themes/ozonedoc/static/css/bootstrap.min.css.map - themes/ozonedoc/static/css/bootstrap.min.css - themes/ozonedoc/static/css/bootstrap-theme.min.css.map - themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg - themes/ozonedoc/layouts/index.html - themes/ozonedoc/theme.toml - - - - - - diff --git a/hadoop-ozone/docs/static/NOTES.md b/hadoop-ozone/docs/static/NOTES.md deleted file mode 100644 index 7b7ca1290b360..0000000000000 --- a/hadoop-ozone/docs/static/NOTES.md +++ /dev/null @@ -1,20 +0,0 @@ - - -The source of Ozone logo is available here: - -https://git-wip-us.apache.org/repos/asf?p=hadoop-ozonesite.git;a=tree;f=static;h=9830788c1fa36c933272cdf87342bb71974c8567;hb=refs/heads/asf-site diff --git a/hadoop-ozone/docs/static/OzoneOverview.png b/hadoop-ozone/docs/static/OzoneOverview.png deleted file mode 100644 index 7e011d5bbde9dc06d72103e4a389d848e5eac406..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41729 zcmeGERZyMF)&>gW9$XXL-QC^Y9fG^NO9<}n7TkinTkt?|2=4AWVV;?__g-s#|HXfI z?oL%#QT5iCqq~Ro(@%F3sjMjV0pTM87#P?G8EJ7tvE z5d{%2u=;pJz-MSMFkvtmaS?Sd@UvW4KlNdZQ7VDEjp!gaQes*OC^YbW6jgdm2_GqC zWz0{s^#wsim?9~Ln4gL;!D*u4Xf-+_g6=l-eBN)@t_Jp{+vQxwQaAS+A5SjU-YzD7 zcX>B9l47D0ga3c+|HlI9XjiT}WAqtN(m_yNp~5tQfj~i${uRRLxJY^eEUeVud|AVq z+;YUs%*_1M%uiP8ZhL;+RFo2uk`C@INO?*S$=Y$kdbRYVqpAYqKlYdWIvA{>}hRC?q%55_<|zAfrS3ZEdHhT){O z&qYm5Y*ig-ESw%7U3~`tqnW;+DLYwzKm(+Qdgq zZRJyeZocZr>Y~{)o?6+?fT&4t3y06!W%SZJ&*68w_~Deai1ed1LphkGg4R_f`4&64 zr+PrBsHmu>dX;|nNXE9Bm|R-h*s&x_s3qEuEh4lHp}d8+<63)|O`%f37unZ#6A8JV z*NKp+!WGfRlFm5#K0f%N;bEDoxYz+MyLZ=0-Jn+=OS%IXM-CNV@`A8(Y^eb&tCPtFfVBp}B0| zdlzGljKQ~AH6U=)LxNW-@_t}|B5ge+NROuX>839|LNDPnMZ=dq;KvrYpUR>Pb1u#s zLe_-`2Htb7ps@e|kmDvn3}I0C6hW`hXp#5=`G<*M@Ma9)t*Qan`~5_@)!ifwFDK@* zn>h!6+@SN@S10i8Ze0kzIX-$KQ@}Uwi$ZOJuniPyWJDpiTJg{>`IaEmy6p3CI_zL( z`^;1#7KhACJlth|^%2WshlS4t(W4E#tr!HkFa-G)Erde}prbO8KK)OA{N03u#+fc5 z|D8$sB&bJp3<93-)mHc;;!{(ZWUWW^ijfcPKD*|O!<=3LF0X72f+yI- z>2>-^1%1T9O1mW!I25$BsxJGJq1O6rH>&i}P}0PR@cP|867MUGYAy#qD6T@0;kLMU z=+BT1v!JB=rr@4p_8s~nqJN8f8*~|dn5+&>9D}OF+ z5?GxudmoHOBKO|EPgnq-WQfRD^AkT+1sE}=+Tx^^8J*cho!{#Fx=tzREn4iE`&H6^ zn4k>2++aiu%%T&j(F;{JGP_cRDQK2l*+r{VN)bMsh$YIIw$ArYUa8Ehyb*`O) zs^!L?Z^gcwze?)#xFourEvRZTu9H$kr7LnhNB$c9F&)@4Mm9ynrJ3%A^8;c%4J>6N zxb6XSU8-g7WgGeCVGCuUC~wW9Rj($HE3gI0`x3giOBbuN?7*MZVo04v^V^pdcgN(l zJwTXi10XzB^*U{>`WJdyexQKPgqL9!mo){3Ab|g~kXA!npKIMi0#SN^<3;x<(K&er zV0YA9=*QC-+5X*%8tYB2Qe&-47b|=M7B;;t|ZQ)@H^FZPR+wzusZxR&tT#H`09{>JJI^nYY!|$@4D>uh!{gEzD?@>>`i% zw59ER>k)n7fBcrGCb5CfDb2i4N>Q%Wi^D5(4ofRrCWtbzT?|_zCc~&^^LoriRpMR- zOCgU%sm(L%;akEqCfe;rwd`@UGQiwcwfg(!;jy7)z5e;C|RcB=CvgNs-}Z zqxr?;{z#2=t>BgRrp6QDX#vWc*C);0cb5a-i@l0n_}GB^8t?-k0dTiF-=2yn&UX}F zjv=3HQ_hQTvBAK)UY!Q2jd?G)CZG;`tr&u;{*LJ0T(5^{KO>e@7W z?mb+OrnW=P#;dPKs9KbRNgIr>@mHG6bLrn7Jg*;75=MDB=d^~>_kv%ElFkDi6P-^= zuUd6|%9NfA8R)1uB<)jZ4(E@PM_QL2d|FOz-}Py{&h5rp2(qg+8j;i_9-ru5e0Q%|o~Mz|al02qx)YmxSp_i8)g^K8Ct z0sjGh;WAyJLFISh)<^fPpMtlVll6WyflO~57MV!+>k~1}<(eg5tADpirU5&&L2K(b zF}+8BWBH)5KQ6B$-f~UbtbNm!3B~rrl{iytd}aQFx4m^(-6B}7^*_$h?9lhC9gj6 z@bt&`Ma+t!&l%}0QSsjI9q+1^Y5G5low8g`7?tRzo}gER{_;3mO*-oGsOcQ9lvvwa zJC>VzkIe?MPZ$Nfj#2k`g>~P(-ZywJh?xGCOIfiXYi`jiBNPbG?r6EBn+RAYZZ+^V z{vP){=h7+YCrqeDv8X8;BDPmJ5}#$j4O^E5{M|}NvJDP#yj2LCKKzxW6yW)40ZjV*b+Y|<#oT>9@Ksg?l0rde z8?UX`(9?+mWqmiiTl)_1wGe^TK!uBPU9nwpS%(XAs_E(s+s5x<;n)}$+Bv$h!;q}s< zzO}L;?g*JiM^n|dlSW=BBs9eQ>h>~=^)tzh#!6>ktF=$03D-+xy0Y9SsKdL{>kvTm zNsx?^`zNc1K~JHf5Dt;xD%jjCvxQ zwBi}zpQMO%t6iA;K7@pm1_FnG?``3Q#4DGrH!jB;P@=VOLOUDMGFOy{A+Wyu)!QOR z{V-Egf~OfPKjQ-gU}R$XE?H147FncjC?31#i+}xM-77(Bkc7qaJo~LGh))!lPj-S7 z_6KjUMhov4^-gx!U>m|NGEkH80r@`z*ssUEhK`uk$ZY3%tJ`q*<5H+R39Y?0f!_wE zQYXP$+8Z^cqf2Wj6oK#RVYofH@t(0-!nxL(B~cm*Jk*HCrE~k}4ZAP6UzQqyyEmOL z&l}FZA50l!xce=JKdPJ8`QrEXbG}&N6=tK8Q0?E-Hkr(q zZ_PO{yb|5aq;$~*AHLNCX5`GzFF)G~!IesDxo+(=(B1qA*BsOTNjsWKA+NWkR&Ebg zr)=@6A$kK9}#1v-2Gzt=}$P>rB<#3n6&0-ai&D&0@j#M5}djTq7c?X|5)z7ITpnjfQXB&rBx zrngR@)7l-3)8#7CxOl#w&grkVTYO{b+2W zqlI=;ZTnjG?T*7X#+^Va?W%Eb?^8y`Zu(~0ct>oNLJIs|GXZ(Jst$_4Q$$xyeQ03@ z8LfCfAk1u|$T4XuNq3E5pL+$u?+k2RMibB>E|>`@WVfE5Lz0Jr5Z~(qBb5KB`gj2o zf_7){B4|-<&vYcr+19D8+GB?<8}WbxmA1XLWqdksmaf^Wjbi5{(cedoB;O>Owqns! zp{_&YtL7aXNL`fG^#V|K#f7AV?G@p zWe#o(*1JA1!+GWeU`DjdC1^9yIZ}h@*oJM*&F*)|zf!8#@Ik>=a1A0Fi-raW=u~6g z!ofb#F&ag|j&05k`oW2^B=ZO77Q;VP@8b{`7W8eRL*T$@&kF_wb~v+Wf8XQ73u`b2a?_Cj>MMik|lQG88g>-SpDTL>8yh-phMFDI)WA5F}LgLu9G;Z=cCtTu&%@zFI~ zsrM2D+OYYYQq`U<-u6p{G5Q^DBnHWj#9vub7`0#V2@29w*X&OZEANw9GlSi%+qu~8 z--E;MQ@Qe4FiN!PuY{qw@R0NejY)^tnn|6s4~^l|KOuRE|L7>obFA}sGsrw!Y462T zk7%&4x=YkrAdL=oO`D5qn^v}?^t%gVUxH<&E`l_?V2r>74~BO9$@Y1!b@bMPuL5N# z)Su_<_)A+IH!Nf(>NB~~1_ULd1>}%GMhfv9JmL7db5gY1x=?8ah)N)OvA$SEf6BjB z?+3m)X?0{ou?x zm`*$npQlDlks;ULs^5K^5PS(qJK?&e?-Mz}W4v__7j^H(&-lE>uXh`sd5eaN=}`Qc zK}xRxAD7YkiwiNZ(D};4ZoVn30bfF zyf#=<@CMJZ7o~(I4BQozmMiuKehbU;+?SboGfj+$iP7%5JA`I`(-m%$PwktjN*j`t zl%Q9<7ti~{4q*S`5k4Rw364J%Iyf7d=}`OxwxP_ag8!Sra3jrRI+)BqZ?*N@u@0%( z)lUjy0G~vCnSClu{7B~?bskU)D#L9|e+Z4@BCEDk5&fZf$fi@yK32BJe z?=pxSm*dHKucKdwwwISPP+q%#vfw8#D@zkoK!h4zViYT#|k zUl@;R3p^W-RXe>r!x)WE%MpD2RSDuq08v;p+v#*#gq^?Ij%}f)a|9?hnE=w&OT1ULs_OMSP{3oxfl6hI1`U3OlWj6Y;-iwI^3hR|K9aT>Xi?CYM32Wd>GMOw zkMZ`on?GYovho)1B1`eJZfuI4D1)O{&k%c7+*&|P53w3tIf|u*M#!ic;#7~;pf<^! zGJdirvv6RM(QNOLrPjJ~S76t!q0C93Aln5|_l1)JZqS+p#$V9!PnGNwfxv~USHK7m z5B#Mr3Hq)t0)`Tv5<_xTj~%LvV$WL2ohJGh)g=tkBURACi5DzZ2vs*fw7rWEoKy%^ z4zNI)w$??$+_b<`!;BSA#qsC)jMctMH6v{YeyN@IV*Z)`K97MKcYKB=r)9n43q5yJ< z9lY{$b4lVIFCpPIb0mGwB@IF=WEkS35mNs>Lqp*$Yx_YR1+=UY>j5i| zC`Yt{8(-7F^d*CI4*Rxi)W^Sl=mi;k*o1b-Ap%cW^VbN_KuX(+t4=5_?DbN;xLW@N zA>O8qVt<+7y zRdZ#(a4(0Y`h&kUX3BucUQsLH62voGLvpC_#|JUd_O@viK9P37|DCu&P&uSQ8GS^y zA?SXJYJm!vGbCHKqV%Qe=j@&rY zz=O#3`40Ho0PTMx2@Ku~4xG9l1LPaQl8Q3Qs8{qWtbNZM5AY)1Cg;ssyFgB+K9=okLEAOZHm=DB8HQ>G6Ydb)V@06tx=5LKXvc&0} z_>0`s{ox#+zcR=!qFrh3dzO$&(4W}ogr{kNFu;Eg@q$mGZjMeovzTAr3rf?ns56!N zKBaCpN1^?4*81U>(*-&p`k%MI$rI-j=9R(AUIi#9aHz1x2XWE#W_X!LQ%>@0^WLJ~ z$o}0w`4)dLM3#9WXiBf_9E;-40^p7+0W2%yLRrjf#w<&wyI6mlNg8yxggI-14f0W! zl$w|V<`UW!%s$TN`>RDT=aOs6;J=ei7=lm=Y_ZcERDuSJw2Uw)cHzA`ZlbA4rPv96 zed$m6*U99S6+}tXgAhn3ptY1YiyBX=FFDmn+gr(IAV_8?x%BTzB?K*@_JNfef&95q zSdm64`vUJ(d4u?v(U|pMHITCUZ=JFb`%M~Ee>86xi+ zKy$(3^J0re99O*M-%*zkh!`&`PBM2D!~lj4aCx^W$>?{=Ou{MJ<4H zmR8C-%iDCJJ^X*=0F)kmd0>X=WVB!@#$RN`zJZOqLtIn?1WOlU7TLFb-2W;1f8ZP( zxO`l&3@d1vsIEw%jEd-_?Imb-{i#K^Eb~u%2t!y?DH}DKbeK_VW(eZ_qJC{?E52Ei z;)8l)8%pQGLSIA(Q#2NmbvKhPzf0QiJvy5lQ_D_JeWeJ;h z_X#Q}w*}0r3nw#0^3n7NES6Rp2+Zl%kZ(6wyiDdvy-| zs@MaTYft`n&)}XUKUL{mlrf5s{K{zJDIKvrl>;atkS&N_JpBgBvtR6C zRySs22%t2FKwMIvO75?}_;1gkrFh`3=;_*nlO&(wbB9Sb(OTM<p`|Za=U65fnH$;=R#9FQsdF{%-$A zkkHkQ;F%u&7uK5Ja?2mVc%F_pKLL*1IVexKHNVdt|9_;?mJi_@k&n<%A4UmQBnAJB zfOCfAcTZ78NGY>I%P78q)PFt_wJ`kY!8ERx>_6j2`G0x*e^Qx=Z0pke{IjbxF#{B( zO=(CL_L}(tFq{Cjb4s+szlw6QLF#`v?vLNDlp)@lHxAh)tX{O$f+&?xwHx&rjKR%u z{QUfRt+g60p8pAhgbFCbpH;a_Q2C1}9M9G+md?#Li1}9Xgb+ArMw#40xv$yoWdB5A zI;xkBTz5Grqb-ZE(Ec-C zVTeq5<8nh31ZK=_F4pH&83V@R)Xh1cM8qr)D;F)T_;1`!ME_)0LQpiEFb#e!d@bq~ zVt|I4ri>(Q?|w6y@(GujhN+jqKVc5qU{Zil;G&T*QzI|ViDa?rvC2NzRANc-X_|XL zdI|hna)vaByUmoHM3UKLTd<6#k(?}LQ&(4K-rw0mBWvmZxWW%YIb_r}gftS|^o_k; zcdiPsxP%~SleK#i)!_GUotNZ6aPaxoA*$ZGYD@~XNq5w}3QelBSr#3YF8}AOy3Z5N z*rSOitBC#c*CvN7R?)4ztGd$PSp@)-$dsftQ7HjIqhzd(>eO1X>#&lnf2~*`E8SwPy}!(!k4|htgsq#W8&h=U+Y(b)JLMnO^db{s5w3)?lGMK6 zqy8(*Knp!-Tj6yP07@?I^ls z%Pr1-#S-X}0t`5H5pIG-)i)CY?B6w62v!ZLf*qe}TC`2pG(+5Xzf-Q=C}U%ukYqL3 z7X7oXgHVfb8me}&mcqCI_&A+DCbAW6$ygF=he^AjG5n^X9$xjQ2(}miHO^tol3AVj zDI>!F?Upb&$EPAJtKeJS64YsR%`o<MG~c4+z~?DXKOkNx506g;b%PsbN2s} zgVKHWaD5tuUn$#|C+F_!D777#tU^reXqp~Rc=y;8mcvVG?0$VX+bplhItTy5TKqHU zAU=#fC_D}h5fVu=xQSE|t&(<1>h`Jc&I&H4=&!3fd~N?)oj{T7NE{?$tL(<(HTB6S z+5SI}&Iu2EWmCpd?fj0v_rKm>(2EN1pH*3cbg-m=qfd25PGlMOZy{8s1VRdDysX%tb{K_^$uxMM@${QOhSM<0nM530-{$w!qO7o8FCgxWmV6WuAt9lo)u=Tz z{qfzpa5!o+CH2VbT%OqJ?ni+4?LOl+U#o*Q4i?s|G?NT8H%43MF~t+`vXo%Z#+wM- z@%cg_`sNcRE-77<26Dyqk#CsjEw+-)h{olqlaa%0JDCdWV`}oYREuPFbR>GK8vFbX zU!hBLR<2NH59$026GLkg(_`&nivS~|;)|ONZjb3izS;uB`jV^OKvr2e7!deNEZ$qD z&l}?D!oSjW@ajBU;IVVbT0w-N2SZjJ@p#=_mxcD@^d=i&HYC?;)3nqeieqF(CAM*^ z3j5YBRlxhH#mpwUXms7^hM{#WcQxv{?m8ifb3MlqWR67)VfO4S5Gc^uIsxEOGds`P zcL5xMxBFmiH`0KL$T3MlPoFa{@~)Ga`jEcTEwqQ?vj)+ouu!kN|6wXz;V_p#YmDVV z7_^t)#30(Nvopu~Fzm7rv9bKpM%~@{#^XKM%(rpfvh=>aYsXRfH@oGwW1B_vF1yPUsv{3Mlf-RFs*t-dYn%s_KPmMZvfcR-Zk$CZsKGvI~+ z>u%@$T+NJe`~Kn@4;*I|U+mK3M+jWyI3Nvf{?$XXOLJ#uCFHw{eZku6Aa4oaFU8&p zGxa2j80E{%zCn_~PM;(owBc-y4@DS!hJ`R?KZ1*J1IBOP8Ja-!q)~6&vLNv7m%JLLEJq^mIsX8 zi5&OBK(;*B#qTpGRg{lF6e6QG-maHv%K?$^I=|(*dg?$LKaw&+L&3PK1p;U<)*Z@H zjE1hwx!F7~6qlAhyv=w`*B(yNfvU&1Tril{=2QM4?9C=JGevZ13~=orG+QBF&PDrm zvSb@+V^zNP`2XU0{#39c-Yy=KuVM?9ihhSHA9n`2y_+MEHV5!v^-VAUoQ=}K>}|1e@fu286> zTqX5jn&fau5n|+U$Pun_Of!_h^KjaU#Oxo%`^4hi5PstEbe_cIXsnw{PP@_dW(T%W z%Sq-WULh`}j&0!w+-&s$RJ-jj=AX>Qu7QhT6V-;JNnIX!Cc&&*WvVxWTgeTNk50$@ z&E@OrWZ8bN0Qi;0mrA!x6?xfByezYg6pz}MvmC(7F;8S10l6W_Mv&{h^WDDZtIG-e5jMVs*Iki%Nd&=7284iBL1yn1+(oj%Vex(Oj( zI*O_=5QB%cJmYa0`!&-`72o7;)EfXrlFxkNp2_x#(NFK6 zu(gG1y|#^aEU?8;3}vbg@O8Xf^jDf_WQtEQkM2p^{_J0}fZ@OQyu?arGBt2%i5vDZ zOXVYnye~--tPZzcJ&I`MBl1fv>nh8xxzv$^VYW$0Eo&$W3Ot$#Ym=SCj1t``D+mi- z6cn$eBk9*V0)N>B`g0jl5GN6K8p3^J_Pq^*t4wov-FS80s5-bHMJg~|jB}=wT<^fB z101w9=TU#2ths=$Z#WxdySc2aa(T9kan5c79{Z`*dT=INrqf2+TIQy&=^do(0>#HK zQngC17(^&9r`&gDg0lydF7p#ZKMhjo<$<-A1aP+Z8VO#bNN-4xNCksgFAD)b+z-T& z@saKwJxHq|SNA{cOx`2g$1$%s+UAH|_|dfLB9I!N;G~T+H`F>gd zP13r)N*&GvY?V2TXzz&nRaVhhh_n zpV3~Bp0u9Dxxwegd!^+lNnAflW0+h0V{gJ{Td6)T7MZJ&AW7J~M2U2m@ygW{^zMeU z(PbAp?g>?Ic|*XqSJV2n6=TJkfRZLax^4c(bTgmE9@t@$*TvQruJVz{YE|;0y*0A> z%|8>+aOR75z4*&UVTvL)TOd&tO~(W2{@wgoe=L!$mEE_gG19j~^;E!i9MN;ssz1Ka(0 zHKz#&yjfgqjJ~-qpN&e6@$L8<_MQX|Xve%Yb6(+nZFus&G@Wx+RiRO1%hp$?u-nYv zu1t`WHU3_HS@UDMMCB%)w*L($J;hEpoQ5E7{F!Vvsa3qd_~XVVrR4Z92FuMAUl@9= zZ7+I_V7FAuFkR_lJ%o8Exj4fjmHOC5ub@8ZywH+i5t&JDhhZ@5_2RCOg+aoErOCH} zx0U_UlfuZ8v`(uafS`cB%N~lfasXGwC&;H}zs>@cgfe%Y$|#4NZGSthFDiE1w!fc- zAxfE)m7dtd>ch4^8=oKIkW**p@2N`!jwVgGc*%Xa19Y^BOO1@MX@q?;AMimTbcM-f zMri+fsusqhs2L+zh4QhV>#n{%$W}<-;@P=nb?|H76*=o&F;j`dTll2UuW-H}Vu95O z-~w46q1(2Bx)Vj-jq1ci%?0v*#kf)jj!2w{7%%dtF7-`IB3S(oRLPl=)c%j`?jVM-rcsV2u*n z@$6-+xC7E+i8BFpPrJ)uGLM`GMVl4>Y(;OAgHKgt=&I*d=i)K^1jZD0tv#Hd*>$n-|ZkTJ=+;5tfiI+&swWQQm8M-N*kNyg@n}&4T zK6q^|zssm05zSQ`B#1Xyi6N?_vJBg~G2jPkbf?D?f^bm#Vsi48p1(_YVJk`HTkJpH z1uO__6f{v#c&kTt`|+we5gOi;i3H?(7^cPXla1^P*A&3ct0HQ_Yk&3)(9m}|25U^+ zT@dmZ;Ay01F*8FGCL!`u_si=my0Cg3i#>y z#|=Vn=$_v12!ozJmqc2z!`LV|(VXr>SetOwE3$Uq>>Ar4a3c-boCUmkky+{3YQSH|%)=V5lOW7ZFH9q2(UazvtSWHI^Y0 zyM-aa)yXYBqCW?=wvr83C7Wcz#+%+HMq;N=+t(a+!Xd z{~l)1cei_2VS2OlqT%BD zJZ`Z>X+ZRo&G48>>z#9@=|aoZZ~4e^Zv$pZt>#gunORC|G|&hX)}o}%1dK_0Mz?K8 zA@qypP#(5p`^nUA4Huz~S?15~y5Jt<=;G*0`;6$s!AQhgjBGBYJmjHRQzSPob0cYR z&6J|mx8IQ;!%9vzPz%2P9391Ef$TPp{E|{bK-(S-m9=!b7*sQ;M}`i-bVfG3CA)zb zr}uO1;=t0BYZDbVz6Jzb(L6hszkI4{R%=3l<)Uag%fhJh7v8RVaGDmUDWhGkh-_T? zWnEY@XgNH&)y`{hiyMaRzSg`GC4cFmK8+utS|X$@yNP83gG{JO=|vp0Ug|)Wu%jUw z!#9BDKI{eDcw;RdWud8A+zp4pk)EU2{?;KqQ~{2gtV)j;AbHT^GAF((tn9fJ zw>{#|Z6XjfsC~q(w^>&u&H5OAL5{!F^+7a|qMS`4aSmew9*Ee$r~KntN_%wo0=&g4 zz^zT;msGSgJvmhGt@T1Lxp;R%`liipuYqzo6ch&Xl8cD=3{-E9jT*2t8M%hT*rl;z z4%U!q$~0m)5esUdz``(5$tKV8)-QIj) zmkE9pD5UEIR&t3*XsAoS`zGQzkbGR*=x_PO#bm0CkllM&dD zU#rp?j7h6HP|amMt5~XtOUnnkw0M`GRc46c+O=xnK7}r{m>|&El0`w^c!_l3uqf8J zpCE@bGlA_MYPH;!x?<`7<;xH-UP59-2w(R#*I?iVNvGSTkoHQBCH4EtM%Ol1_=&yZ z0W3VXlyQ7soMTZPJIU(cM+MGam=1|QG+;10`gb)~d?sk@VO7tsxDzKgvzFh6v!OBQ z6MAF#YymFOnh2bN)!%QGjAu*J^R%UHokzw_rBt3EU%sROPJ2xEP>o zUwvnctn)2&%cF`FsWoaxBV-|*CvT>|cOaID9j3S0ud2W~KI8F)W;fmMrS}Q;1N-G_ zP{5cGZJtcEAY#*7`S<#a!PK3}e(GJVToG#4VqCzM$ZIH`PcvoAgJJctkU~PqrN5{fgaMZzdFA^yIM}ZFkm_2d|UT+z;Dy(GShLL z?g{kFD7go>y|pwQgxYsILKf2<5BX^cavM`bZy;cBPhbhtP3k1q<(OXo?^(!A~W4XGFAOIBbJ|ts;4reJ5YnXQQRGC^XXWS8_bbV zFh2cZ+nEw2@;Z3W+h_N*Tw~U_HF<7^?{D{&>-t>T^=AX;;4Fy~OA`*2;V)?wc(lNp zSgrTVbLTgb?VW2}otJ*RUxL(q5mw;A``8Y|D!gzgE|UJ$O99RzHiC&0)H6~o6623` zd90_)@c}_bY}`Eb6+(gB`Y9Mx9bx;hbyg0&jdAtWinwnbT{j0`pi10!Mjd6<)xW8i zyM^yf7)ej(@G^$J2+)3?fAD(w;C)rt0zcJVaaLpDzuS=t|GLG8NMqQLQ;$xCFKsdG zLeRFqXag_U)9LX}p~Yyx?EZZQ1{zED!m*d0d2xTOa*@4kGMi1pLLR?1$`8%^CG7LV zio-HvMcQe1rEu%{PbFo_9f(0JGHJ2W{tg9DLj2H%(5lm41U5R&9)G4FXMeXRsSWxW z=4BPO6}qfVi`LsSl4~{Vu3qsC@0j zN!Xa_n*q~=o)127qYqbn(B|!uihW`s`)o+*3E_rx#wRul;Dy`t`%5>E!}YSUjdHQ0 zuGZ45x$5HnUZ7`}Z{IQx;yrobmD8h?P2mk$0ZracUUuT2pAG%}Jppbj|dtaT5l-zBomBkCci0TSGr|6VEfVX}< z&PQf*_KHrYFJ=j!nc&I~q<==lPIbfq`inh8wf?4on`*I}<(g}qfOM&o{QBg$xoJ^^ zoU`5u%tn6m;p zXk@f?(;m2I!SS{&a_fm(!@ncg0LOVuNv} zN*~wUGaG{`3mCOrp^wFy>tJJg`mnVtPkSEOVfFB1i24{W4Aj;jH0-AKB zI8zg4WJ2e3nz&>nw98K&h4sk-rwy!c04W`QjaS?Jb0lu3qaA~7=jm5>B<04`2aKpy z2VQppv+4&YMz(F((x9Gif^!iz!TlBw?({x-07ytoowcU{uYplWiBQX%v6ax-5d!l3 z4$ftlkV~eW=YdDOBqPf>NMC)7o}bDsp`yF(2MmO6kdUtdmA`4?1;1zqW6LKBJ_je5 z5yeEuAHP06#D+ixUtSa}jg*t>4cd_8gSeq7bN?%f}*W%oPc zg28$h{Q?s>$hgrfU9Vv^TV}`OaSM|F_}DUL)TC_<|A}RAh?tY_OmNHRbIR__=vY+# zK}PY)yi(gCn^=sfgQrhY_IU??nd}=$=IcTktIrUztK#q1Y`lG*F(jEYySW{im1fgo z+TP-+c#PCda$;v3cyckY#Lx7@t@&T6>^9Gz&8FpF9b)AAvPmbwRzUBAP-UIBAK9j| zi*a5ANgEFxWqh|8rz1C|#AjEEHmlOA@J2-XY~oOpG10$MZCUn`rJlFVr;MoI+&UpZ z1?1QE^luuaBm*Tgt>dfs-F}~wgxO^V9Mx2g%q*<HuoS}7m0!LUERAMyF)l8+Woc*fSby>U zG>v!l1rJL@g=Ba9>*=bc4x^99pJ?eeyk+eh##;?G0@D>!N?>4ykfkDipb?D< z?FnsEM>$J{qTAU%H`nH>|5lCGi90p<+NX4X1iqqakhUpmd2p5QvM zXerJA{ujXZEAQ_#BgbPTPb*jx@WD#GO#(4(a8d z8)n+RZlqddZ7DmCFFw^}&OZtPx=Feqt2YzYBs%0>yc8gF3o`7}X>u%O){qdfkf52= z57yXksWB*sx87{ZIg{PDSrC5+Woy(~YArl6 z+L$Jv$FrDraDOS*=Yyum|ISvmp_pIEECO;sl&|{-D>M2yZWsxqx}Pi` zo{zSfGJaiG5=64XcTxr$%ou_`b8=Rf{63iuAO!ZEkF^eYqIMUqv zW7_`Ho|eBkux#Wp@7bX1ChAr$y@J|vvD~sju)-9=*k7Ce%2+h(*0D}@dIJh*gq8Z*h30-b>xUUcL1RG|l8hwd! zseW3i!eNm|n-a(N?Epbzl3b{_HiO9#>4bRg{pFWg^kR#MFD(GhK6IBJW z_LMJMJ@sl@8 zi;^Dd*o(^~kY7#ZY7t1yUDInXby;~04zjiaTh3hzVim}HU;kxox+^`^{m&DC z%FyrQnm?CYF*nJBM7-KisxuG$l0zI@R@Z>Q8j`7aNt$dM zHL>P(5_Dti{!?K%A42tdLdvR{qVz#=gKo}e3aRJ+hrPcHimThAMq#9};O;IVxVt5| zyF;1~Bsjrc6G(7(3+_#Dhu}`|;O_1Yck|>q=RIH5_wW9>Ki(>;sp{(8z4lsj&AH|n zW6u4p#+LQX)bP_nZf#!xN~K913XY9(%y7hunKl3^oygB8`ollse$vYtPa*UQsV=TL7$^w?QNqNV0NDS3ctZ6d#Ju2Vn)0fcoy|?$|U4kV3U4W7S(3eoC$m-mHd_Q*F40DGn&Z5 zwH|!8>&Ha@3j-yfsl?11JCS6-EXOLr$np2lx_|y@>=}iJM1dAJXpDU2_fTP z?(!)y3U`zd?? z+P|AP0NNX)y2Lj!|Ljj-9nGtngr%$Gj@xKTP@0<0$L#g3{ZbPRE)GzOIbOiiOuy`c z)EMH$bta$V8N!%!=GI87a)dl6o)UjZSpHn>Y024wbR9nrr!P-dVNCeY;B9=v~4 zpc0>lQ{>mvo_I_4y{qT>0&X$JnY&tC$NlA!o*C~YfGV^H3*L~PyWpC2IlLLXp%=X$ zN071zwt~r~G@@8;|CJM#hXbI+b0$-rJ{F3Nfevhh(Ji@Jy!Ypab(k@zoE{yO_YTj~ zYH|o|tG_@`Lru?1RL=XNCUXJwc=nVbvy^-HgxWC3C@*i-rr7BM|iju%=sW zw^XlU^8G-+{ov5?QIu|57^UBnRGZbavg7M+{IUtwD(Dvl2ZH1`yp^zBJ6Be?$FQfR z2JcP}O|Q()*LRgZ<NSKfVyxo3wPsI3wPvub!vcZdBL@+2wR zxkikOIi{lD&q{T?XKAkQ*!e8_Uhz49wb08DL-0BZ&Ce}W)G^Gy6Pu4sYPo1qKsrmToEu9n;l+$er-8Gy{*u$YP z_IAC0-SuLN=`e;J^{(*T+yH8$#_)aA`#xbro#$tdOrc`RcDbR9w(e0MDJeTAPnigj zr9Wf1O#A}ni|E%OaZJnQEsdUuM`*U_7Mecm{r^XXWPs3#J8`0|!L@Lo;aKm2XdYCE zMzDZKOo6>PJoRfe$nE(%fmvGX^T%WJF1$MmXicFQ666*60SLN_VCn%hNC6i8U6eJKF+*c-uMvccXWoQ0ZJv<|ZHjAzPFq$Yz(8AkW=eccOalMgu z|7zIVYe2Rqa6zMdX3J}CP%{DMyEIWG(5}hRu>*Dv!d;JlMR^%Y6ms6I_i_f-!$p~` zysfUmclXBY#vi@kV0|fYrPx0<1eFrCUEb-r#l@eZnE zs^pGZGKp(ut29we|NH{1Y0M!N83lACg;*vya)(Z9+z^q1>dVb7WI_gmGJ(9JX)^p0 z5Q@?H-5emQf9GY_B`pgEl7ZHVKYs*9AZFtwK<&FWM7#tVFCc(<D`JqDmfZG5Cly|GdK;uasZ-jzoc&r!RQC^nxdnj$?Ut2 zpLDz+ijv28*ruwtn}%{Q8Hzx4k@+>bRc1G+RCkq+(FD&G#Cog=;-fCmc79DP4r40~ zHNg5`p&?MCb#d#AfusCiL@^k^9LxCu|oKg`o9-1$pd$B zeZ{Q*-#Q%NcZ{TgH%T#U|BtvIs48Iv#`Av%`Bw`0e`oTalDHQ``oA{$Kbq$Mw;Knh zrOx92cmcm)ES=Iz7RZ|;y+>ED{?V9etku53UkkjUfZh-Og^)Tz2k3L%*rDkgZhWeg z3!3YPt@Y?Iv*yL={I^X1Tf#&_cT$rWROr&Lj% zcUOun+r*#$qx@H^e)0jRv-Spjr4!bPF$_VhlVoh!nWttQzgB;!pm0@+R9)~8)$sn` z;Q+6Ea!@R2RtHM&%jU1jFKLc~6@#y= zgLbY~)_I{)m0{j1WQElZJ9xKOmg3jNEBCS(4Q3-|8~@fi zEXe|X&h?5>UlN7}Lz$w9i5VHWsTiG%y`(p-@QlIr&{(T`0fxq)oMkiyqAIEZzr$48(zZu&S@rSf<6m!NID`R@>a; z4nj%A(<{AEY?TjSehrO}&F5j#)_m)bc#^KqTN)Y?@+miQ5C<-401_&g&>ia+3Y4Zg zvm4x2G(HNcUg(XCVW);3xg-$FxU3j}ykmRoN}i!7m!QVE?`d|k{`wzy^5oq+(< z7{M%m#SU|FiYxl;3}CxRvi)mK=GQLCqUIr`hE)a;d1h2e@hK6Q2hs)ob85YQ#qJ(~p^!sd|hAl%0rXLd# zb-49P2F!C}(u~bxjaZ)H1ZnLa&Kn^@+i_)qh#`&MTg>Du#1&g2%H7$=>A6`CbY1C@_Gj zelTOPdq3QS)KxZ%_gNfyb$z~I{qaTftKnsr)k#XTA^=kXRcH0t^GjFv^l!KJd5%3f z;36E^gnye7#)H`E%M7=nHN09XU1#ZC>7bD9dyiN_QoTJ_Lwz0&?fO#VCc1wI`Cb}I zm6WZ181n-SqBQ*}36_0`n4eUDo0RvNzR6^%y_2VXh^R6hbRD%=9YWncmR4UN63y<_ z??N-53~8&BXbd7~FK{2nR za?kV&Aj|_N*4-riyXb>6N zY-h?!G5+q$0t~-MR=O6k1F%x?!JC#I4-Wz82M8)54>K19TVd+RkWTmP6=0JI1f&1f z+4)<62#^BmV315u{VyZ}fHHJ$@1Bys2NXAGXo-NrnXEkg>Mx2E(4*A9jrP|JK^B5c!vAvlbA*gl7RC zGmxh6yh_8uCMDfFzy)N-@?qQmTN2^M1e}Y==ABFV{^N%LTG_7(#Mkoke}346!oRN% z@z#7B?6CHSvR^x^LZjHg@LiSZXgi~cx9ZQIKOJ`3+Z+H=;Eno`R)2!9bb|KY*=DH2 z)df<2GGpY%ur}MoUf}FNcJ(Obk68y9nZC=qEBukn=z2ateG$XxDsp;V5I)}Cw}P%V zY?Q?m@vqAzp{=lZWN^=R*W8_Y5^+DgeVqqRM97_bX6QTXUZ%tOc?l6UnchG**{(%J zx)-v5`y7?Js6Ad?b&u_wt6KIj4%rWMIJ;RzvFUkHvnr`rl19G>U2D-7>0*0Z4*PZq zT}BY>M5z^znQg~k=e(Dvzh3+5h;;w?;i5kWskc7n=Sz11Cgp_x95B%^>peCrn=cd? z-*$aldR&gf{J1SMS`gef%lX>0FO+mMvyC^^f#>12D(v}Fh4s=+s`zH(gsjTivR6ZB zd$w-flX!l5-LkGUPuaa@V|5ubpiDN=ac)avpR;fDI&Bs=)hbpKe*sLLkL|X8-x7MO z$$a9Ot^Dl#?&mzu^M}&bKgyq(p&Nzk6IkW{V_h^z2g`NxEJfK`d-?jg>Z6H6D4(x+ zKC!%g`|IpjRDW}*X#ETTp18eD&sdWL7nw45xWuSBAOpSB;Al}FedD;zRJ$T3VU5q_ zx=la$w*y`82A6zYWuD(sn{4G?=PfYT$zHcYkj--MC2)6J8ixQ5MWd;tTxj-^|BXsT zVSnAx^(Ke=gJ5wKdE8c{b3z*bdEWr;d9Lzv-NseZo{WM*SOV`mBkr%r;+vK=L)@?M znJv!decIF4y=xraEj{kY7d^LAo3%JX5;u#KP&Jdm%53q~E8=%||FB(`opn zsaYNT!DJoSQnaFR&G0Jz%^OShxD@5*2c-)TFJ6N8*cK(5cFT=QwME@ev4o5Y0Cmcs z)u>);cdLGa0pg8fPW4IjxHzCH)0;owj!UQ*Rh5gwO4 zD%M;!SPN_P@wz#xs`NhBd!zrvW3lXlon-CYkhzZ)u1TS~?{R zw<9Hg&)eWRY~nP54tS+G_ENmRUda`kUbQ$)wFB*FnHkoDD_%-*UglEoT0Z!Qm4@r# z9`d^Q%%R^J+~6h4^8Q<$W$R>H4G`KCU|<75A+HUnGxnzxdKC1m<5CO-d5Vl{rt8&6 z_^Xve@K~A0#>UR78;R3IUG)aEOBuFDGGb;YqzP&5hH@n=S59MkZN8rcUWc1){G=psYq>P^)6+BGHGnv9)3qs99hIf$JYimgG#A@lgloH{3RN}|* zl@H8Cp3Fa>Z5!X!A@Cj&X#H&%(cXl#F_5keD4ALbCqdn|^?FRJfpe?Tf~El5w}pE{ zZR1I_PbX6iNK+Q6me}M-;<6v$Li$KFA6j**)f=w}t>gM-*Rk&*#uPJOlG|i@$pV4$ zowhuuWH-k1F#dhhgE(6F6SNw&cdaSh)AX&(dd~5*RuO}_vn87Y@%v9z6&OC(o1mN< zpE{?yh9lVG3Uc*2R2}=n&350WVHTkAA#7dNMTcIkoWb3?Zi>)fR={5 z*%t;|{f+c!Fe06h-@n>FksOJR0seoOGeRA$q4fFb)bp78Ph-WYUN!cqVX3u4X~15{ zG;8>@#02zjTSMAXw2N9< zh=Sum8}(UC7sRE_D?F#6nhqssg<&BfTBa4wLo&_P*IfsQaSbYS%B zJO@hk$+d6-=`|13u*~Pw4deOZLceHV*posR$4j+#2d?_tr;xJ_;+bPTGgXw^Yyi4#k!<;S3$^*|3VEGayw^*++d7A*Ghv4y*L8 z^9)skclwUnbIBUE0Ts%&7axyU9xG7N@?tG%*&e{A{jzYU~BN}oyFL)MhC3Y z&nF_mgyi}+F#;1AK+ko3$!~4o0fwPEa9Co7@^mc!UT~_sAuAEDCl#fg93#I&54h8z zzhb1tYxosfkJi0ikz12RTOQxqLiV2CN}(8kuqk0VT*a4<7#U@JbAZLIED`-F_}YAS zu-;?1T3gH=PCMHuReu*D{!SbcJY99@NK$ooG~SJzznBPU&TPuPkDp^&W}-&ZzXB$9 zL^>V*&v}Pp0!Xaj$%UPalxJVnX${6%9jQG}`FrcVdcQpSU>gX6hfDTXcAfE7d7;tT zIg;WQG_JI&h_#I6SAvmVRBoq>)mQTj-w1Mp?DYgc z0};90l)ujPRHjJf=Ej&}`C6bI4ff_>U9g}IwPJdGiC%TjY`qc+uE}t4|GI7}zov~f zc8T4N5RN3lLH|t=Iy-?7QXR3-PWu$-m7o`doo@nb-g9_vyIh{3?P!j(6#t*dCzg-( zEi59f&9b}Mr}zQhq~~jiWgqK}UI*2D83Iu4UE)`w!&MvS zwwoywlXn_O@^XnqMoh78HcwZKi(&6>5AiemPCp`SuD=nbrSF}-RjJPKxUzA-YyNOD zT1tpiWP6qjiqXB}rE*U<%qw;p78b2NUXMTZoklV7BV|@ZuJ{Y&wd1*xp4jhf5AIn9 z{k@<2EJ?ZQpCWO$P~n(nGU(s5ah`PH{T%tbb!npDVf>$-V$GDEDDDa)wnkC6%OR$x~N=)7$v9E!Ib$#iy zbiGn_<#|($4MLoS4*iE&Z`~Zpxba_h-SP?HP0 z4Kl|%8KX!9w8O$Z6vkGK7iiJFEBvrR1=2F|l(i#ne|$a!9{7RNJ>N@C8SWSNoALq; zI+rm32NUOQ2-hPlRWrj|4V8mJ3LX^=91wDrUx*CmTew_Am~l8PfgfL3WGUz=ahJAd zO=9I`8k&X{kCq;{&fP0*Tn1b-Xhu0X1t%i6MS4$r$UjP5O7q8c&%VB_%mJW5VKA9sku8Tta!@AA1oZ*_Oxnr zGx;5ut?oj`NOkJJ@feCd>nJFTx0WY{Q|038gqa~3OAJ)%C$dVa<37Cuqu_f6Fm-B% zg;NQWKBfcY<4Lc+_MaQ(f-+vlOY-r8yxQ5IQfrEqD32&yvhuF?*$fNBKqqb#HLYYa z%1fLa{pqSUJN?zj$v%|AV+;YML-uD~A2P01AvN*T6C`Q< zU`bLFps#p|c6BuTJV&3mF8Qn|h1f?L86lDzNvfAUeo(}N5g~>HPT1BwU z`5fE&<6sM>gu{oF;YTRdzEtD|y!>%i>32rqLWjeHidF!CGQq`+dc%u;PO<`XKya~l zUB6}YI6_VxLe1g$WL74vqpHOs58neB{7v-%gN+%Qpe^5q2xq7R1GfeJF;N1yTg)kAs?ku!X+&*WyfqyqBf;c4_lMr*tA0h{AYJ~Ixkk`5 zZ}#~`pg0ImotAq4;VI|DNe}{iY&~N^K+}Dw3ssj z2+UY$;&t)OV`WR-&iumOH=+KUseKx0LQ8FK#daRUwA4huiz~U`1@}pHxykZdVW*+! zOB|eCxQKpGFMppvSlHN|H|tde*jVM7a`!@=r;YwAyWfm%JUoz>4Mf>J%FJ$- z43O`S+HYziv5o!zxe|bNkC42##QuyN{)lu6OGKZ*5Q74h%0u-)oafPK5uh=kpfT*p zp_lS_S8d-f_+E)G64RdO)xVK;bFX;U+)^g!`qdHa9F2{bl1@xa8h=BF*`o5B zA{&|_qYqESjKZbdNr0Jmr$u}&Wqj*K_(}sGXsaovf1sLse z7#ZU+g~7OpGLewdMhBzi5z;qrWR>~V%@jQCgNT-(q0cees}att;pEm9K^%v#T(x`2 z;25>amHhoBn^Rv|qQksG>?=kyr-~fQ-1kvRKA*bpA74*3yP@Nc6$%5J7p{_Gu%QFi zxMhJH2Lj0~Al$CnS><;6wH7B=;)x;pt^E7yCyjwYt&Or=inWPZBToBbqc3Lq6~uP<>#Kb<7b4I*5=M!aUw+Qv(KNxc z9`vuNyzB!g=t1mHgWUB)#`EWlGY;g?|4y=4yCEoFIn$?;XN5>83!M<8@hCz=OS_1O zh*-hxWp{IbznmoCY$;{tmCqn5teP9VY;K6Zf#|JLfZj1~_*@W;4lK?XGXrHGxkUUf zB{K$;{{hm1_w{16%i-KbFsYzRF1OWmE_fXTj8qR~yJ}R1l~YY&WC)XVZnan|-rtfW zw%qFdhK)^ynnJok>Yt&$47e}VPi@_`Cyda2`x^nU(zkT>$1T_6m-{m{P8)ynA15KWW@s1+BY2n{J`QolER-b)D8c5<-*cy@r#Pgf zWwHtiDBa!Ns>a5~8EgCd@;*L34RWg;U*FKvf0IjKDnjyY$P|BLPYiOP`wet@z1u5+ z6q=7_@Jn}K|6KxLauh&uL~R^SAI;Et`1zBS()ivMD5c9K_VrCTovulS$Z4EeV?aZd9AcVV~wSA@)>PeO2@(fwJ!ru97~yTLf%NZLBqS%)m0ok zJgE<>_d6@U#Bn}f(5H)#%Hjr;s3ce7>IM|y$j>^&7ZMQHONJt-{xtSt(t6i~Mjv-a z+aj34_@qbDT}kOhdxh2Ab9U;0lG*$F)-f8#d6nxir=L*3^Nt1%K|7y_+e&S*(Yf%0 z46Nm!3d*lvzNn04OD?KvY(y)=9DQ!bTJhVgNW*h z1F9keNW0Eyw1_~-T$wy)A{sM=M3hLWH{#3081;RjM$!0WwxqnJw=|>WR55O59mUd3 zyC>JI3HMi6vr724pSfV{TZc*z0k__46By%81}#t2*5q=SjmxO@dqA^f_GD46W{)ek z@iJ8vNDGSI<|JtRRC7Mdzw9uzi>@k&RKLk6bE2>&CS7QYjUajT5w;%Jd3WKviEe4W zVyZ{RRZ6auefx@EvIOo#DD*ZX=~q)FqX)hGA{7;Mimb_-cxEC)?rm)k*S2KN!~`bY z4o4f#Xqogwus}x4o$nd|?i^sAkT$8-5M8#kERvKzpMHC+{(R`5S8IZY%dD0p6-J=c8H7q*%Jo51j?d_-DkjOk zon(De#+nbDY@xe&+mW_BboiYS19+^BI#FBYJD6*#iTUGZdCG9Rf+bifm5GEygW(RV z@#PvDVdjiyLgm;$?;x#bm0q8Ilo_mYLy;8HldvHeZDQi0>5%!IxuI+`qKoO0=?~w& z8?{S-@1jMlkMi3MTmJS9ac`oI{1y%;C;H=3L5ff&{x5Dbx-MJzb$i&dh{cRgBY;PO0r=W(Ay&Y6gzZA;CT ztBmY%IJIrD((Z3NIH5rJ3!1Z*d&Lj)*DZ$>!tjdMkL{VHTIup7*(^WZV&)doDR=Q# zpmjt5=nIcHQ*v7(!ei1A(=49p?F)SS>UYaEMzLWXrmUdbM~v-~%XFKa?$4PY<`PqJ zvpkQbE&lZJ7QOrWYK~{sc7$veh{N^CfdAGGvQ0w`C*sbY&1{pdG(ioxM?$;*M0&C_a?p6g0zdfk)cPE#{cSv;p!B-ZZn&)k+hHQTl2 zJbLnuc|NRa8Z7wUDxw|FzZK(C_I`%bdz_EFSPNyHI}O^F@`p9Ep7&__I}pzC+X3a? z=%15Of`~e?ennx+M~jVWVyiLLHiLS56M1Nl&GvY-uV3rOmiEgt8+?V>zrVl7DxRr; zb8dlz_RAmMt^I;_6kadoZ-%8Pl#jMy~v{N)YWtADSK>9)r-H#UTVn^(%_&YOsj_$#Aqqpnj0$f~9Ovl+X zAz!@rF?=DJj?5%PyjJ4)Ba&(lHGD)*L!iAmN4DFW8#l52)RI?gevfw(Bq{8h+^^_y zWK0LO^MwU@J!;Tco;P`kIJfwV)!T4|AID8zp)eVjOP#lme>>;4noF%dxhK4s7|+~o z^%NvllP|q8LUb|N%X^tpA?r8@WXd`m+#-zj)>$ssEM)HGAkY$bow?hvSzDa|m z%@+C?(`_T~KNQ;<<|4o}V6(NHmf%0@EokbsnZ0>za3Qo=d=Q)}p7@qh`$>9Ouckms zQ7<+U0wJ5)kL;I~t-d}Uu6EofueQ4P5ODa!T4U*rUUT>-eQt3v@D&{1NCIZq@=y;1 z4GAqtspFQKgzf&%tP7OU#h{2;L=Q*F`PU4vv_979TcBT80=th*Nm3LBgwR#6w)7*c z9Vw|`W8@5?t=Ogel4YMwo%-+`YLUa#+uIms8?}XJ&_O5=QBofbW6}C zAFj|+99NFXjY8Sx9SPcIiQ5F2_>2U;H%HpHpP9uOqUf({`=V{jzg~S9d-dXAw&(Hf z0{n7b=9ppoK|n3~%Z-(j(X!I;3HG{jZK;s&6%CwrQ4wJWF5^^|oEMov13YneP2s1X z6w)!gk_NZy-g_IW0;GgLNJrB`vT}kucYq2B4a$0=sY23h!sj)pNvq#s53sNJ4QvV8 z8i?C!#a8`He*=r~i>l3hmaxqu#lF=E%bmPz$LrV4aFZb>EJy=W#qy!icmz^~i@Ux7 z6e>GC>^y1YoNizW48yM{tM}fD=a?1lR9*v3CmNq$@R&a#*oif@BXG3!}mt*ryBPw zGjWl6Ip*Och*p(|W{GO#!@~pVu*m!ko6jMyPZi7;Sa<}c5+y#z^wd+URI)S(3Q@+2 z*M|{1DbF<&K-}5>F?@MprC7P|xGVhOfQ z2jJBCpI$8`XDlobm|UauK+b#YT!es$l%~D*Tq+N|B5Kop({Wqb3YgP!sb07h27sVL zxO_;v-}E>b8++m9XoVT@7*`QA!Og0SSW|@lV(M`$US(>j_VIz98&`@*=$txjSvkdRcfPv#KDnz6AL$dn63v=Cy?HbS zDNVCS1(J{C7sJc=bmL)Gm7}Fa4TLRAX0xNSj$01xdzoHL7$EbyM|4}L4&I-EoQEJv z@9z0NTeev8ms#wKbV+DKp5zm=pMOx*;pMD7K&R^lMvMEHeaLqBGt1;$sm9-Ii15R= z$Fd~)F4zLSr&ieEngs?TbKp+D^zoR8wYu&o+k0i# zr6F{g>E-IzrB61{m-~8|W=g3>%1&KB`Jg|yQz?^7Ux+6ZaEN0dC?xbZ4UyYZ{U=l6 z#Rg25h~KHotot3&^Eif0e}&+@bKdI=GKkI5$@LW~SMKs*bF*$*rAtCMx5)jD=3JFk zj-kagvV~HlD0jq>wQrl@DQ15oXvI&r?ocRi!@gB0k5&~OILK?iYzrdpZC-YE|E0{6 zxSpN#$gPR{iG<(Ok!=#3Tje=DipP`rBo8Ak5RaR~ZM$7no^Qvtm=frs_Qb;}r#$_l zC`iOJ%moF(i}G#E>C&04&)L(V4|_YopXRrw=FT9k$jbr zJz+btEyqQY&(|u%hl@Lf=W|uQZe>e0zGY)B#<9fio0Ol`)YNwNN0;;bg;P}^O57F) zj_9$u)T>ym!#u_^ukYN}2=wYKev9 zBG8$%*e-(IEn~>J-Jq>`rRdtZNkifVt%wx%e!g#a@~R-Jk<}6yyO$n;-0&)5Fcvx0N3N2Sj!WvfcSv^6{jfnJSC$ikq4sSj3%?#D}ujF-TNT*$zZhJ4WQL=2JJNX~}fo z&J2ELL{0%XN#gQd%BHTq_eBZIvB-COPUu~}SE#5hx6ntxCaC}7Cr!{6-P{cU>EcO; zAAHe$QcLn<{B*6ssVxPRu zGHB}~dfk+VO=KneeAV*Dl+TR5H}|Ms98)`dB;_`#3%T9TV<+ZIxsJ^>XMyQfR`bjS zuakylmE3XYRif}lGk*}ahWd(T+XS87ox{!|-l}qD9qs)#lvm{0b=!$novhh5vf1CV zu54%K#RS4oR}TjX8*kCP;%xhEz`1j*+6p`XrbfKDC8@IW5<5|*RHY3(lR2@;Zb!Xi z>0nO}3stKr8wnt@8uze3KYSK^yfkB;x0olApQ~Kj=wDyhPn}**C3|z2D|LwBVRhzO z{o#-Ui<;Z(TI=w;wO_P;-jji;)&*cww`CU+@WjT5soR?o_XmUy&OF!B# zv5~jCe+-JNJATf{ZIE|4jjNKH-7pH*2T-4Imv?W3gT;=t(k1tFDeDC^i`C@s*B^$r zkMk1s5?oRqrYC65?MyvaUs==OU!0C@tqiDXV8ztC@>q}J9X88d3e1g-k#oF0TV_nO zfKq-NC3@#t+a&r3DJ!9pduiEKA5|QhttVN~rC@xsgOwQfVHBFX|X43@~IL9tP5ueyd_v2c`1v!xD)U!U~I4 z@?N&J`J2VnHdb*iPM4zIo+4Na-{oHEgptgBHB)d;Cv&f$rR@Ue1SuY6%{dd#Pu-kR zrhO50H$~i4Wl`tRX-Ys3-&oImb1Yn7D9;+o`BtPW7F}Wkwh@UHJQ$oniC30E~URK`-8$ZAGXN?WHgb*cT|q2}Uj)OdV`HcTVzNcT2%VW&Zx!Z0@3$ zbq*sFB|bd$hI_X}_)COO>QQSQy5yr{!j>1ms{Y~I)(|pLiwNMkKOgs-j;2K3#X<@b znKKk!9m&q7?!mt0S&11(;_+@bkazanW!kzTg^pY@k>zm(t2T8LbDE)_=7J%gTBfM$ z4;0;5I09}F+o3tZsc!1u0gyo5DyRGfoTJUU2;|KPw(WvSpv^Zysz@qCPQlcpDs-Zg z`ii_Phb?lGkl|}nT#M>p@t*SCmh9e-YroerT|CW~tFE<)LdUftkCm&{waqCPFbHSC z7L^=bvlG2V1jNfR!8TEmdlzKp4R*?89G9!-?)x?Tw%W$5s<=m}SLpP)wEFo5Y&K}h z6~i#wbQD>FMzKpIMf^q{(OuApkr6UVCkArLSVIG61hDs9#HhU z7{7djt+hEnzJQ9ff9VM}(f`0_g#DhdnBQpa3y?&`9Ql0!y|XiPByrx}j~|1Tk!s6@ zU70qf{lpn5(T^AhY>#xPv${{G52LbDZH-q162#LFX%-iZX|5GRjDI40`Z>IGrQ3Xx z&c7!}_(-<_8PjiItMS)-T~;hW-c}x))GZk=2-?(CDiPwXT<>EDQ&GZDkp+_9l1ihI zd|KayZu`mYVS`b8YbYqHELn-S8e4Hns!zQtZ689@$hO_V9_D$8vph0(kUf=hFH@|y z74SezpZ@e(nTa-yb-$Kj`nJY){>R2NO;qD~V0L9y;_(8_O!Y&zJ@?1ReCzPED0{{G zETz_r#K;X3-I}oTt_#Q8J42E3*pEF8kl_Y$K!(krZ~b0%!lJ(#?R3WDyGFi6P4!T5 zpw{}1wR?*pg62-<1L26xg}faxA&`E!#{qt2cl2CfpggIvA?gBVKgW>{?MPbIpjFjqV+W} zr58weP8uY4;&01l=EN&Qy$*THNItY_@_X;Pit}3)(LklRzJo&yk6rzz)2zvl>>`yo z@zl$VBNN;v6KKh6DhY{P$n-cnpSUrX0}K?-B{@zF1tXlt;x?JZ7%WQ19H|H@ahe(;wQpJu{1>L zi^HMy)HvUTo)NX*alEVD-M%&$aFq-@ksvl(S^Ca4( z8sPzqr*UZSr_0phQ|9I%nk6&NvbSgYqRp9tz~VKW|5A6v^gG|2yJ}AZ&zwo_QAEZ2 zjeo$)$rtWZGex*Q&i3}Kv(SGfed5x&ee*HXhzG-J+O|*XBkHhD+!6*m(WN4DWe3}N z&4q-Oln}&_NqTl{cW0H2XD?FiHbhg_B{`OV2$iVj1&_@4CE1oq443~ z%bLlFmc*w|NDbA)Z**4}+(gP|H}f|Bu3-Ozi!_jgs8VvMJ=Y(`KZ%aC2Gt|OP*8y4 zo+=v#Z*X&rkINOSKMo4a{d6WJjr;>CYo`bTBeJ5?fyoCWBt)8jd@TqzS;AFtNebn| z8kuLX8zX^Amp<1bExbJ>EmWNlJlrGYIYV#Lo$J?KheabGVZ#+TD19t*^lc1P{ODKa>>!0zs9*>GYlQ2y~UECQp?g?H#jPgMFe8 zvv_@esSUeXHlJ7iPmX*ANWHy)gYxoiz-g*x$!Aoc$__wZoh3(){ZG8B(xjF;PXH-5R!)L@_H9T_j^*o-F)iYD) z7cgN~c2-nmh#Q$Nm|%ArSo9fc&&dw#aWxfLV-t7|mduSJ_OGvh-kFePJ!xK9q! z-Z|f*=s_T1yZ!`4QjyBP-@}>)Nu4ch{HV2!+yg1blMbd=}@A`hZdf95(sO>!U?^Qo-whRV$T1naGLXtE^)g!k#D${-?7* zv4FP+=JhPEPv9mum0U_dN_twzPaM6NHf7^NYKSSs;Bi4H{ZKK78wRM^mpjG zNY314%5P>kg>%+1#TscIij{O=!{u+AhxQP0TYeh+>~7APlrs%*6{^E2+{3B_uyW7{ zLr^>331-Rj~jiy>dEYlxtj z_x;tS9)^8;n4O26OZ$o!VTrGl0Qh#7k0XJ|0Rh?y>*aa`|IAGCbV1h$0P0RpV7(O( zm0dMNKw{E8p;aW|4dAhx>zk*z7rd$6mIwwwRsH{fs<8{<4y|VspG}0g;{cjox}B>B zlEtW9PGYmv{M|?Y3@+T~K}5iVe#k?DS;KghF-@B_Q&BpSEU(hE#SF|`aw^B`tk-oqtI<%sSi!hz|*hXli!Jp_s`5|e`c9h zV4EEVP9Tq?>)5S!N(@TTAP!wG^I(MIcp$9p`7^Q=e>5)TrR6z z@ciE4TurD}xq&RB)|1kFon4;Z0;gJ?4<`V5(_W9)WWV|~)oj^fG@G?{u)B`aYR*WC zI4=c+s$A-YoUzKy!^u3y*L&l6$lNw_lX#RP=@bCI#Qy1p4T?7e*q~X}&qo=vFT}`5 z4dW6FwE0Uwc2BwqYHHI17(63bH$j`l#(19Rn)uN=RgFo>Vokv>FZ^9GH<60{CQ*t8 zV!4uth{%XNN4H?V5RUKF(B~H@wEz1z?+0fGK2m-Xr98R#d^~17wS7T7)e^0;o93_D zkcE{!_&|X0Vk4;-5;FJ`*StmW8h^dKKkoZD)I|q6NVx>M6& zBlas42^*i0u13dZB0&34*EXJi#poV~+A-25WOf?iUql2DO-HBR$QE}lsiAl~x_;scA+^5k~aYAiM7uMM^*Su)(n z(UM-*7W40ewg)-WST7z@&aaX4&Jp>^Wnc)&o(NmBnkoN>Kc^bR-K72c z3fEYi_nTa0G-CiRK8XdPRWCrP9$r<+?XG(1=H+n=Ag%KT!i|Ul`pg#`VCdGFn<5t- zMhXB7%y8UChw(=e-RJRoAnx<}|B;QrV~dSKexdpuylvNo89(2*Mnp$%I~^)i<=vYubIq}jrmD0Aby1tn=&s^ir&%MvW0HcImw&j6cmJq##G zk9c4Be7>-~LhrQ=Bbk(CCpMqJhz%%C&nY{x{;-L`rhkK19^ zLa*-Ph~|iTIvNb;QxZiYe7RHY9E;8%Q5mgTHhI6+A` zJH$>3?h`3=Z?*RcJbV-|~_f|?`Qn7u+m!(5k$f%YcV0?IX@s~#cBYX$F)5uTfK9?tI@P% z^McUX@sfPWSXGD{@}%r+Rees&Go0sDk#oTM%2gXjEVUN4@Pv66*d3Ta>n9W*l%E#H0&qD!oTM>bg z^YkITT?tR*%*mXQR5LQdEUN`l*s%)evLfls4;O7eMhPwH8_?Ufw{ftY{ofN#B{n+Q zO2^VEag}xeL;l^w{gL_-RbXp6pNEN4dl#60R4X-;gRdYIPM=%+bbv(6rOrGVU~6ZmBUEk3}2NJy82(lvDH08$bXQj&s5iGVZ;B8}w0(2amHfFLCj z(p*WULy<0N=^;eook8!tzV+6d_5CsH%*?mX*>ld0-}&wDY!hLFIvUhp4QYdgdc4Hx zXM{exM5g8W)JiM#?%kPRwj-~V^aB-VzNCL0EQU|1Wu$Hs>Ij~7s^oW46PW3@uAA5< zXUxHT^@=Fcb3@u|K59FUiwxaE*)vlvpCUJ%ZOLzcPnYcx9-{~@J|#93rV7&5wayH) zyZ6>cV7dDT=vU-&xeiYA6G8ay>q22wj+64q0%qd%`se?_0qOl!6Sh0I=h4GCFwS#qRIK7&Cj+!T+@Cvg zT5aG=D?HA+e4%kW+U(({R-v)6x9gI7s4pIq%8C(`UE=(XY0t!G=;ndA_ z0PNb?pCWyKDAZQ}Y>|-eAYLO~KAiy%^|j&E@LbSdo%VzHoSk^w$@U*Kl6cbM-#8le zshsS58d>XgYy{F5TRx$oBm?9^N0#j2Bj@(5hJu^65*XNf+UJC<=WaPgh|}#D*b^C) z3-7(Ar<{z^z9E^<93u-dl>nE)6W~ zbJ@1ck(hO*mOerm4iu8-?=SD~uZW2f^oS=jkdfJ|m6r{n%n>U@0fW{&{lvaP931Vb zBZ-YTKDrS@3E_0bpy#@U!GrP(${lweGjJD^lH*{JZ~%*{*T%i?DoVwBPe;#Hr2_?x zpdt|t-X`=BDG21^Pm0Bu&50Bajb0;0-OnyoIxsG3{1l`=ca+(XwtRqzy#GOYKD`9$ zj0Ov8q6s-#^lG2V#BuK)7lZ6o6M*bQAjGj~@H-dhzZ6DK&R@;-y5U;(zj$Fw7qjO( zpFZ)VfM#Gqj>mZr&kN_=D?lerg}{;ZHGA3~toY4`VuA3TKUsZ(&;6r6nOHBk-h&Y|+%qn$<@8YTqVPgoMiFhxY%T!bsbcV# zJ^dB~<8KJfjsYJB9;-Nz(&-%R>n*1XOuc)i7i8v}rm*|KSJncpYIrh^y*g5PqbEaL zuCASgloY#PVmaW=McED$jccqv-9LKn2Z)e5U%<+hZrb1~8{K_4ILB}IrLaN2`>WbO z#kWv_V8u1(eC=_gAB2kEoA>kYJ-AhHrtT6rnb`-L?KCyA zxKSeH-o4kFiy?Xq@6j<|0J@8UM4GilINyC&g-KCY3OLx=2Q>*BQDu{Uw(Xmh@85`? z%)hO$AHR~2lA=QFRQFV+V_CYw3f21?kny*zM{Hbt ze1&JS#p%n|=bjNrc%RZnmiy5+?e)_mF}0Q8d?wVE^!RJ-_%CS8Zskf(EP8FQX3Xt) z%Mqx1)6vgkmyBTVEbutv ztav7Lyq)%?OY+2K=YS;J9A^WCGb_2=`LY51<=f^%t<48x6`|o-FqH5@AotEUGHKiO z_m77*$KKjU%Nh5T)z^8RE+u3n*ooRiL=r}GLVR`)ImGJ&`-Mg%u;56OKHBU4{(N9! zmpXiy^5W;c(b}#ww3k5Hvqg2OnvzQv*GETNGv%1i;fEXUEs`>HcGxzLT%Ph*iZszC zwR9yR0AoBt_hv6vdt`?)(e=0m32}a9@&&tOM~k+6c8y11ZcTH5gx#yX6AK&_&h=vd ze!BC$C0s)4Z_KCP>R%ac)X=_aDw0uba*$FZG+B2Af4E)-2K;d|-*)|v3PN7Ywq4&ICL9sAa@e)KdoSsup=Y^>2= ze`yKEXh*99=;T_8=Ps^aBURY?Y3ABfy*)u|IG{di2PyGYmMdmnQ|F9k9FCldNVMw* zFhbsdfS-zir}1-ExkdagshQ}e!(d-yM!D!bACqcyM=8Ut_O&_r>1?MGeXnI&_wLW| z5Hy%d!=xa~h6^uM+rE2HVKeeTtWwa0r@S>4Y9!d`sb#yfy}htPn*4ArPp^Kp#Xe0A z*5@?wp8vwmE@3}%cgb5GP#8x5&U9R9j_Y%DiV!``Rkl6ojglIXcwBokoIKYdXI6?3 zYndL+hfw8edXALq$k5|ah9fh2a5gdc5=Q(3w)BHh|I?F)0|!IPM|*d21U4m#=yE`& zWBgZ}RwCor5wTaqoF@)Kh}COwze(SI?T6~~6Y~N$zs>biL8_VSAIhgTXL0JB3elZQ z(?g2~j-&76+8Yk>dtUswp;w?Es^lUdEZ++jc7Jt7r9uw{X8cAPuBiFWQK>B^ul$4;_MrN@bY z(YdgrbN(*?4@2t?lYu`WypVjxd=n2C#>*I{R%d@y15**&@pP-jd$s>1E1==z&^?

=HI0fZY_u2hFzZ$HDk2L4k9`EyQu7W{Jt`XYob!*A@pZ_O{(8z9OXr zo;DwWsUF?fC=zq;eo;~UBKaMo@|O}ZRTAv>bL^@5vZnMNrO)2N5}TUO^)fqS-*;_4 z@yKGt#a$m?Sx%QDcG9X+!o+Ra?c!W_Z63n_pAS!uYBbP4_hQPYL#nt2uf11*zpV8= zmrE^3adwx(nfr((k>BBauX}%W{rE>+3))=Z>NN+l>(|T^kPD{5t)SOs>=;Kc=C0f1 z>jU?8j&`{2vSALja`cdug#eOn_g0$RG?5c_GEMVC5S`T2hZFZTKV%ePfDk~isX^3* z`R&jf8xrH~T?Rt)o7#-*TV0xN7;ZPRaWU?&Un2RwnBFU;HPG=pt21#QzMTgdH7E-e zN~lzf|KK|1)5iG8ar&ipVQ%hlyDvqGK}=WF4h|SH?%t7lcQqiaWfuMQY^;Au`q@Nz zrOUmrY^SGW<@;_T7*&kH6AV`LmT9XbdJ)2zLH=+|Hlat=H8PU?p!YZ;&vy%3kzq8S z#|UMKeNMV7HV7TGx9mv!utmkzFG0Z-f{3_}iB@Y~2WZ3;8M${uxwN39WkdmiJZi>( z)9uoeE@2R7wYU1Yz@m+S5?(}7nrajy?^rk4pNN%k=TjHO54TC-%OZ1FtDke|8+Log z^Il>7PiU#iNZOE~4CE#BN%q?upXE$}mHxZ<;+$t%2)5anloDuvw`Q>VBUzW; z&+?HvCqeS0uc=+a7*-55$)5!~Wj`CAEe;Dy5+B4|m8>#1K~>B@MGp|H7EDkpoqfN zdy(qKcuX_qZ%F$uEaRY=gZ!({Zdk=M)BPKI|7M5%afkocf}+r>9aHcsyoQBkPk}!- zH-75KwzJnnU83{0;>e`RnNiFAPleKuh0VGVT+zq1Acg{jL*_{z+{>)UsjyL_pIu8x#_rQdhH`IPhP6paZbNqZ@xnZ@W| z5nE8~+HHIL3OwhwlJYR2aiQWkW^a>T*FbAR1Z~%XA)C5&tSuh-nL(01j69_5(SvZS zSdXj+1NnxXKi?Q_JTPi@4O1lAaGgyaPkG8UG?_ITR=qB$ikI$u1)1R-F zn*NDY{8W~{z?T#V^LpLI8E?p9h`P5~o?rp!g@5!$78@`|bu@&G+fsaj+d>yC#wuxJ zEVw?sn>J!QZhl4IC3#rS*d+}k=M1oBML06Z^e&NqDerXqXht-hP+>2v(TGa4Jzyi2 z?z6h#)W>2DL!g*h+Fi&InaC?y2r|D6#mp;~16_jYee9GrKSJ4&iW1H2CA6#f|I8xr zhDqfr=lp?N$s5|hEAK?DV!@N_jo9v($J$spubPu&bvDdY(D!W(ROLTWAKMie%3NzN zMc1l6>|)te>nnaP4s-QE=CUBrJ4l<@XMSa2awW+=v{^LwpCT1A5n4Xxy7OQ~1*sFnhH=H)4Gr z0OK_LHY!FfZ6)Tr(5UX0(#)dEj9$T?-l~C49y*ko=3Y*Z%L{`}#=J=ODmW=>lUb-i z59_N*Chx3;6bfW<=1`d)D>s*{P9ZedMoW`pMC7{Sl4MJ- zvs7o;Xl~RJ_{XxoIuigb-#?CsKjcBsCRi{;kR2(F?cKbgifok^nBn^i*H^Sxk*C^Ju+pZ=%UqJY3dimy#7?;?* zTYh;ltHP4|(;y(cQ9spO9Su?>dh|43oDk4b|5?obp=AA0(~~9wsXvcM5VU@S2>5fw z!0v*?XH8`jedX|V<2-_8bKWeKjE}I-hWaQ|nZ6EX!`^(;zJ;9U^^s;P#ZgwVq7r4^ z%`DQLu6(=U>@b^H`*4Y1#Ztrs#e6kCB0)P6Jz{jjD%O@vcu#I{vx~(g`mI!taE#)h zip2o&|4xW1yd2s<1EbdWK40?!6%V~R5mGcQTOvte74|+}t-9WPA-Sz9J+34O$1PPR zufEM2H}Yqz+gEDcIMVE*#mv3?1M?X( zZY$e~&V*f})D08lO9Wk(u>V>h_ioF2xjXd)2cYv*qYE58BVu_QN?qN>!vc&G74tO1 z$TGz`-BMr0+|?}h#^=-(vn%hcKo-g6X{j7V8KP_2jpk=A|Ly4h@p%8|4H3uI8PTkV V(nkj|2?%)IQoMb$ME-u@e*i&BNTmP( diff --git a/hadoop-ozone/docs/static/OzoneOverview.svg b/hadoop-ozone/docs/static/OzoneOverview.svg deleted file mode 100644 index 9d4660db1c8d3..0000000000000 --- a/hadoop-ozone/docs/static/OzoneOverview.svg +++ /dev/null @@ -1,238 +0,0 @@ - - - - - Desktop HD - Created with Sketch. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Ozone Client - - - - - - - - - Rest Handler - - - - - - - - - - - - - - - - - - Ratis - - - - - - - - - - - - - - - - - Ratis - - - - - - - - - - - - - - - - - Ratis - - - - - - - - - - - - - - - Container - - - - - - - - - - - Container - - - - - - - - - Ozone File System - - - - - - - - - Ozone CLI - - - - - - - - - - - Freon - - - - - - - - - - - - - - - - - - - - Ozone Manager - - - - - - - - - - - - - - - - - Storage Container Manager - - - - - - Datanodes - - - - - - - - - - Container - - - - - Hadoop Distributed Data Store - - - - \ No newline at end of file diff --git a/hadoop-ozone/docs/static/SCMBlockDiagram.png b/hadoop-ozone/docs/static/SCMBlockDiagram.png deleted file mode 100644 index 04d27adede700b387fffbfdeadef063a2586cf9b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14714 zcmcJ0WmHvdxGikhbV_WH+92H}AsqrANC<4YTR<8q=>`dvZbYP8BqgL9q@+Q*yX&qE z-#O>raev%9?zrcN!vU=I_WR8F%=xTv6(w10jHeg~2ng8!$VsarARuM~|HRNxfZy!5 zzdr>2L$;Jqlt4hJh{3!u1|uK{a{VJMq3({jJBRw7@Z|#P!9u*ACv^~>q~EZRL@Z4h zNR{wwCMEDhg*YC70-gi{i`~CY=qqfj&%3Q=A&K~wqL(~Q68S2{+zMp)2!BFoi`EV z$58PD5oWb0Vp>&C+eXO{B*w>AzYrDm8C>c4j2rgqsJ)NYz#{4*G0}aKjMr*W?A^|v za}W{|Rakg9m$a{elSx?0y|e||31e|;@GIRF?50$3gri@51~f1UXn z`Kz~Dp3<(@(MpetATk6RvM5@tS!OV4@YC~h!^H{m@8JFFLEKGaWrl{Cv9WmcFT}20F1v06Z^r9;%^`i}{czau_xhZ0Xm#pff0?`K1-F${;Ee#1@rx9PM(JTu{VHludf++-e(Q z{9%J?Tx3}eW#Ea~_+5k}wO{ZRYLrBLd?w4Rpc>iGV(5Dd=E98(!6u{O=2lhuO#_7@ zCUWGMv=4GrhpY2L8$+*Xxc-4?trrJTUHz4P&DiOlSZt;ZM1ph4Ca=t_S{Tcu!$cp|$#r`w*M zIi=j$d+Ui}^g8jyg1n4hKiMm>Gb{Jpu@(0VLCyI-t_x-MYm%ge~P zTc@xXGO!-ZPof*98@*cU;2%omLjoLi)is9|8l<|}tK}FmnXYJx!!oiCyurBXlrFM4{HS6G`|fiQHbY z5uYs}$e`x%StR}L_L@b?-l^vYmfVCQyQ+ZKS#hO6Ykk?q_08ommw7KCT@sN9edz4; zkGDl9J}X>x%t~jqW_T+yh?>N})~-&NPO(UfvkQ}m`3M_$?N*zV?eFfEu%;%6iB^|p zeD2F%#Y@5B?Re{yJEv>^Gq=&~>MviNyd7|zdxhh$9Q0yU(w>~xr62^x7FCn<=`Mke za8N`%Pg?3-x8o@P83FYW*)-ptk?qxQvujaB9sMzlVe0#_sy8zK(<_p`HPyZ;ye$;h z^iR1l-5`Z_=!${+5;(V}J-e2Sq^=DNz7V2@*zywB{RPu2rUa!6xk=s$z|QRK8g}_s z`Grw&Db@FUpbCtE!LCKBxfm9MY2iz4p;D#A+j9;4drjANxp}nf9tj+VIET`vjpy@T z6SVw|eG7O!<_pJb18RFep!Njq!1k~M+q1>ieWV%Yck^>-zxlrOMn!!d(Rj5tUS4p8 z2H7JNiPRzDj((#Rl}Z};<>A$KnQ3bPxL1MxQ=6Bv@FB8+_v!c(FNq2HUTQ(on_A{U z=3j1_km3F$t~PGZ6D!R!)IMc@45C`6)d;69B#&om+jx3!z@KVvK8iWSBg&mQ^nUvG z{l@(RsFV?UU)h-v%kqP~Z^s2>Y-}7`R%v%MA#&7JGRwHQ3DQei-1G%$`NC0!h_MB# z?tjt93kL>eh!Nh=i5OzB81R!n2B8sNq4;})6d)e^pf8;Jiml*cF-LB17F8z>=uF(^=KMqR zh>1i8qoG-!vgw~9zmqVRdJEfHDu=qfFcs3!OF_$FB5DnhBl&(e{D%CUAdE8!{Q4Tjyu&oV zYS6b(T{}PhIjpr6Hgh(HVN1$5Gbb3nS7b^e_gR(!9`pn4vEVHdML)eOIat25d|Udp zP+}vd=vaC`kN)N)n-3C(&IE_57FXn7@3+G1?s?PX7L3%gfu>N^G$Np12P4o^t=pvG zfXb+{2a11h{a0n{X=-VaHD3KEZw$n%N-?#LZ%=seA7(EQ@7LVK{r5wcE%Rq<3C5vM zNSLpNok)KuVz)jtp$BVGSdM(9_C>KnlMPMVC`6B|HR=JagQ(HnCFDas1Kw4L%SHejy|RSg#L@TAjZmS%264;k0ollLE`<~ zRcoAOfDDUIKuPufzP0tLDH~*gX;BG;CAl}2Qml{+zM9NHQ_HCB3q=#Rr~=0qB4_#Z zZC#wV8|1$(i}*BDb!(9=RYp%Dycf;At~(pfft?(=wwuFsGSv_v)bCArUkq4GnPTm7Ows}w-e$I^%kz7Y%;Qo=L-&WrB^L{SMdTEF+jr0T zIs%s)ptVj*p+evC%#4qkiL>6!SYf3)K$IvNgOoFLzcp%xV|C88O&bj-&Ah}_bQ!6A z-GYR?L$-5<0M!`r1`kq1b~TmcmAj3i7vK7DSSad+U$+nIIvz_cC@*}-w0Qfn+1Is2 zp^8#kN0pc!D=9c_M;L5%S|e1k*ydWh-IUOKnB!#c*m|}Xkmz8#iU--v< zcu8$f-na?s)woZij7V*MJyBip*NG}LvFazZy}CrG$8G{ESlV?Qtk-AnchAJREPp#` zvtQW7KUSRFW3_#9tYjahlaHLGxsDZ>xHRqS=~&T2GxRjfjy$_DpbdlvOB`D1?OI2z z0RoQGHN6E`;fed&^0kBIJ#l#$!bg(C5JSjr=Y^NWVwJ0MmPT9QWlyP|{7VfP939uv z#O5+5=~%RNgi_HxdLLF(IPCj08iyk&lL zjU9?DUT>xj7bx?XM>7j=UEvwtg#+Y-pJ>q`EtQY8=yV9=$w7fN?@7m~9*Gl-g-88` zq0)3y4ENdwSj7 zL0OkU9hq8!a0oGlwP^>x@8Y4*H{)Xt)Z(5CCB*-nZXYu=bns}!J}`n6BUXf<MoMmK20zb`Ac&D-){}ZJ%*`7DVp>}5e z&C+x#ab|cifl>I-&X@JiJjcsa0isF%qJ?e{Erz391dc5F1F8k z{#5Ugd=qXWNj6#4ZJ><8Nat9$ua5wuI<3xroU1T-ElIzobVuZbQ;>Xd8#7xkHY}fP z_@ZeaWajX(@26BpYdz=kR9!pF}=g^;a$_}x1jQ+Iz)u$U8 zmXhsm+WIVC@41}d>e70{a$8>F-=2(2rqQ^&|a0wmU5C=Yx^04N}#{6i=s3U&%jodo?2TbNq zI)o?7uL`4k>A9&!G`E|&9_gy1;8zIVj_&Nzlk;3UONsqlBE(ZHxqNH# z_g#_&qh&=RAYV#0Qr`I1?OvIO*}YW#MamYeC@-XO(D)^Dwebls>o4$GSBz_0!sDnC z4ZsR$v0F{x>Dz%cd_QbmCz9cK`WY>sIH+As)2OvtH%{Q6IW4S*z{`k>=xDiTosLeOMqrR54!lIa)O~MRa>w zRd>HaWdY}_$0kbY9O$kI8^1T@Qw@)Q3n+RT^xm~~1GWnzSDy1DcTP|Qn>+%A12&@I ztrNR91B%Q~duM-Gv#I2(Zt<_HbVhyg9C#GNIPqAMnH46^Ov$kic#$U4LNg)RMHcf{ zvQrc*;tJuT=)ha{)GZ0Qgq5@MuGa&3OaT0cVJ2VW?GT~cr#EZ0d*2!x@FgZaB59T+ z@)7HlNgNAy_|!Vy-W*=c_@z=Ke|Zrr4!UHev@4Qb*XL8;wz~dwf%qouo;ry163-4Z zD}mZWQrUryyjcVt!;8B5n7)f6I-w)Lil)t>GFpbc5=r&|S}IxjeubC>5f+2{51=gW z{~N$X`Ozh=hwX952f$CtBU$j_;W&DLJEh>nJZ_5=Kzzu1`}!Z&IExn%CHuDvz{cRs zkTV14K*hj9>0y6z0XU8S)@J(9!J-OqDngIAogcSf(N7|umxA&!PY(rHmS9|B^5fv& z04SBac|h{ewSanyF+XR#+t7UC4Z)z!m#40j138{}z1(!mp~pjuCKEpDhb1vvcU+uS9aTZRFS<53HUFKZ-}^BAj48BB z+VQuV&A;j|V&d`&CJy(l&RqfOviYTpXL|(!*;7+0a`=wsozwTBJz^6S_;k|5#mjhF zWFbME`!D1M>caL1qu>5!uVqfiHLI`xF&!&>LrWuft*s8_wBO@ z)<3}?)N?kNUYgF%c)!l|MEk8)d0^Yj5ho0ApkU0r(yW)E(w!*m`jvc%o4)`cM& zR&KPu^_$X@-=5g^k=aA<1~~R(GVMD!oZmrLtwWsV?i)vOJEe zC5AMAd1(0OWYmZC%Ke#YsTs815%wWv3{!D>%qa%NoK09BK3!|O^WvS6pA}YPFPQIr zH^4R?d1zQ8QH9J6O_@!kVgu6=KKEOGi54PZ-@mfHBH_doobupIWfgE1z!^2X&_*u% zaZx7V^M2uFzt=fG-R`2J8}zF@f~T+xOZ~zE2I`N@&IZ%wwx2OS+GJTevxahZE3}Y< z^|zy0*7N8sBE;XpE?Gk{CRt~Ly>*qA3UE;tn<~;d?{BMn-8_B`8NThRQJc)Z&&P3h z-QzU-YQEfjJ`|Fs`DrG%Dm>|QTHa-2oUmqYjszW|deCz$w)_!Nznl}F^E&U%^H=ZP zDc7?I+3xzrk^c}lRTGu&on$`&J#QF)u%>EoZTaxKoOdxfQe4p;^Ej+i4=}wybN;hNLTJm>N)?rm;E+w zxjI$a==3ICz?TN`ZG(QFg5be4}6J5$6PT~RFGwPiQlLCIePR?(!!EwF1xQOBpitxwo;e987dy7>dHdh=ipY}CrQPjd=dR`BK$QWMNl)jaAdB4`d`hAK{FtTZ`gL2Mlxl3jzrGhv$CxKfE-7iI zBb#OSmW!+U2m9u9VpKK@37_4+?p|v#X|(nTuj;9kge=91l>Ytk1yNE-zeg=9S+IAr zG7naxZFaZan%9u^y6EYp_9fVJ-S_PDyfc(V#J zn&jA765HNR1f#zk%@B1VozbL_j6v09VL4da^}r!zlG(ZQ5-r@gyir25gsV?$6UI}$ zw9s`2GjaFr1v^Opyh2c3SeDX2C3Tz{73a@#wbxfN^0#fH~?_I)WH zR>s{3OTg?g=t^!&jQ`FpPTP^kA{G%}w72&2Jhf7ljemX3CyQD0_>|>O0Wa7n? z_x}P?ZW2Q<3?nk*F7F;UbFvw$N14|<39Cfb^`UxRz%S})!IZHlLSQieLV66VgA9{W zVzp2rwrJ|^&GuLx=~{CQ@p^Q}@<^hm$<_rG=hsbL6By-ig&U6x)o+Z-M5n6Meg$@X z-_rX;&Wo+rKi`pb`U>E+#@|K`FxNXGQ$A^P-H*P;+3eAlU-Wc%58<^pZ1{6r_Zbj5 zvy!X!o!QQd9VR=`vyDO;n$l#mHnh9wh9mT2(!$wA19sV_x6ByJJ(= z9z7Js$ft`n62>uni-z)2@#iM1Wg%LI(}RL3BZzzH&V{1PhA;+A7l1WY||Ygcbu3 zIylYE3W4K0E^xVz{A4VM;J7PL1mJGf53IhI06zedZgq4?ohV9+L)=HR;D8messs7w z*_AiG{g1OkMW_sB5nxwE>iA196OC2`S{3F zYxDf{J~%MH#r)N{{Y8J{**lo(xL4P6>@95V+Es|51Z<(RSHWI3=zEa`u$yo;Q;4Bu zB;lmQI%v5grFcI-r{?p`Egvu-)N5A$8)mB`I$yz%g*6wBSxqr_Fp z01Z?w6(rmAILtT84;KCSd6=`jftl#F2wIOS?5NORVq8qxzzm0->|WB^PWXs*lG3zT zz99Ku#V*xP#)SrS65oEHD^fvzy_PAhrz1h}B&Zk?C*}CMsRY4ro3iPF;Ua)bF&#ea z)PuzyEg(UF`NZ=G0O1Bc{z-r?mFgZ4Jg~R8Duh54!N6mu2PWpA?>JsHe8 zUDp2&d&_45U%spxWY(w9{rv_jO};US!~=#A=m9$nBg+%-ip5;er|#m@5o>lrc3{?| zXOXIkh2+CjiCCbS-%`>jA~QLfK3Lrwa%NpMyc2=JCS=A1v)c-a%5Q$#SP{-5jo+G( z_%WXiVV1!Y3vn3YrCg}|eDyHzSN^{rWXmsR1%Mr9L(5&)Z;gQ%hzDk|o1n3e?FDcl zJY$*N^m)H7#K>79 z=`s7$Q*K#c!Kz61oz`;zp71mf#=;Wk;|CF_0#w-nhR6|BcEM)VIMAUyKIztr}C zPnU=VNY*Ou8ehN%8v{Vc(Z-tPDtvzG#lT_exH{f?h$%Yar?P_X-|1QQ)j{;1Nr|iI znqTRp*UrTMI9O=tU$a@?IH|umw!Jm#<&ntw`7SCT^X)*)CgUPH5PmX-dnt9gh74}N zEKPU%$drfDrOo`!P^$0S^JvcuH^X~jf#ufoN;kq@sGU(O*mW8rj6IEYQv8BJoprhlzx$-!qKM+=S$;)ZRL(rK06DZPXW>C!ga(>daO9eKgYf+{Z?nCfqaf2GP$g{ z1;9ys?S{&Y*K<)5>D%j6H9i+-fv)*fY7{?ZzpEXITwH4hy+kp-Ok0TGK6^T^aq_7? zg6!U4JNNeDQsai(5>>U-0#4;GhJ`NyWA|Y+fsW_pd_badcJqD0 z<|rNTFjUj_qUF zEbo}kq!)LnxFM8-M)R{J0T(1s(Y zSMF~n0z1C(tR^4&QejJ0Z>aD#(qT04lS7eD2hhgi9 zriM{j*tP~Kqe421(n303}B{Nb=lEyC~Q;04u3*xW7*o6C^UvZsC z6j7kt^hp2w;DT&2Y0G_32qa%)Nh+%;gA7u{Xa=Zce%y_xoH`%n&sNK{*x=?wsZ9G{ zJAY0Va&O9!YxH-q_YT|~A@~)cd;a6>2U5XaVZ}L%kC*G+Lw6<}b7F5%sLCYsx(lGB z!yl3-?fea^Bm@f8n<=%-nkpISYr5_-+@^t)w)kQVm-WgD$>7E$3{7qTbG3(q(BUPZ zt7zIjPna?WIfb!48$i2nvGHHs`RHygDr2C6GmFP~Rdi$f>TJ=Zrs>32 z*un9+MuNh^)cw^HJ6-0Z4F!?-y6p8UU-ZB-%5j@eW=E@zW~`V_`Uz|}B+{a28Z+QDnzB9_ArfQC!- zat3vuiN+@n41y5f2#*=`tL%Y5sQ3UEW7Tqi<&k*cBLVOl$K+*=?G1)_o^>}n-lwCY_(Y}M9cq- zAQpr>?1%6r8USojAs2PA|+MBG_VI4zi zu4`U^eGGTcK>~ z`|FrM5A)zO43t@Az+ZQXFjySLG+O+DGzD4tLfaObp-xE>Ly^_>Ii} zgj_%Jk|kij}B z757z^Sj#~f83P)e+`EVY&8C(){QH+IhfuZWeB%MJBWV#FN@t;EwR3LA^0y@JV^$Oo z-fRmx@`;W;GFc&0S>d-%Eiy48rnM$HAQ)1gr)USn%IDN*+}$IpQuwOq!rw%87oZ~3|%sh=Uvt(H|z$Ce>K}h z9gxI4zQD((sZz-qiXgzPXvsRoA0j)iEQ0=fJWJ^opJ|9OthP_XtQrDUqO}rAM%`y% zXb<|ZIN~J1@EfddKFdEahlapdeN9V~2MYWDNd8oI=N&0ax_cXn{W?tinTFB~eWEmw z(v{|#QVm;Q( zDkKivW6*q)!N#GHO@GRF3BwU7ljpCa6^6&1x%$GMaI!U}HqBwVsYbfRWqEkVi zef59Y7~;0ER8jq!$~jEBUE3lXT*iSLfnJgszke577J#U37h(>MCYT}#arR$?vLvdG z?B40BH2j17u2dvq`I>zZlZ#igN}~2tj9x#d&0l%poA++4m$p2GlLa60#K`l>ZGL|v z1{`%{QV05hNOqSw0sQV?Ig-OG6Dhrt!4NU3fD=N~Z0Cz$PzEgklMK~-+d5zT1{I2s z!GM-qFET8Dq8_q5rHsO|-bJY_z8aQwgFx%{V^7r(T??XY7%c3g@DYqv@?x?}aHNl^ z{`CEZ2G>`&C%C-cm{h`b(kG>#M)F5bU4y878Tm2-m5! za+Dt^dV04vew&{c*Chd2*BD;^CEY{}*ct7Ea!@`ev)WhZuuVD{y%tlUU zx$Ki0NknD*+Qa52$_0Yk|M+ub04_AlQwY#e*7E+ac=B*}OOIm)3OBhe_uQBs1?sin z%?W%N7QHmzjtG>iOYzSha_Km!Rt}q=@cwhK>|^%TQEm@~b?*QTID#=&78<`P{o$+- zkd@b(PFX(v9c}*=5P4yKhVy-Q0?zRsB(dBZpGEIC`}w-v;8{3A zg8huBZrmmUdt=P)1(Jmr7ZE`zq&yZ{GBK2IonZvdgNsFKxLGf)d{d8&Br$3u{C3cL zE~x^CqLiN`DAW-w|Hji`+n@@J*a7F#o73! z^DV`caKNx6E*C=EQ|KJ_1tJvwaNftLn-k;&-Uj9;XTMSI?-o zYxTj#j}g;>!k^q3<&%@i2xESpey|1|-2e4wRpb6tIWz?$`HygmCy;t4G_C1_48Zz3 zBCUAnSBR98l=Wcy=E<(yV4abse^SyzhiRHEGznjN?kBIUS%8bD_70*Ba2kczbNnc6 z*51K9n?ersZ&5#d+g8s0-VoRSh~muZ;wYJ4)0_q3zJny{3*BqaCWj?0-ICS44K(H+FDdd&jbzx_|6&k>3tluEIeEJkT z&5x^o&|4^rO7#|6ujy^zq2D7&r!k-}89k+SkaF`Vg0a(N;VVBm8x00bV77TF7)SM% zM7|Mv8g>k;v`3~w05BEB(5}aA&4x3T zRr9q+x(U=es=Rq2{HU+t0B)ZXBL@%M2mC+S&5OfcT|Jn6Lgl>;Wos!_@+_$Tdd~!y z7h6(`&oUyi&enh;I_Rq9j*?!Gb8^E{?rE@+jY`8#f6T+EdjEk=#uW%>TIcMj z)CkR2s=b0UX5POe@6K$`a@n0$UAT76E_<3dH$cO*E0jT=vOEYxTz?<@+x^$cGZe>Z8%N*qQ$)|C?u#=4st+(F8$LJ5!WLxsAVH7y z0xi|ZjjsOrW?{dw$6$cZ3p54fFp`ZnfAsL^ABC$)0<@FD2(`@_+eeo-jU6M)pPT_G zZ-w9UjAu|UDW*DzZD~1R0S}o3;TZ5i9%WyZmgnH_fA8<=5^Pg3%l>;T;GZDyk&_~4 z>HvRSi%EqzWtHn^kCY=@W&3ScAQvnJiZ@pxN;Ia&kzMXF+$6~Q)xJ2;wzH`RJ=Xk zzc{RE^^|WLo=dvq6n0poIBdq zzdLNj8+^0~!Kf_)IKtj^WwB;5vNa!CmtU9A-`o55hC9NsJ!YNTG5xYPm$a{ve8f=h z*Ky$0c!o_ryQQs8dmAOMF+V1xg=<>i!a&27+LQKcj7Vcux0cyTqt;24D_p$$Dr5wq zzB$Mg+AVIKti)YFW&Jn_miIRni{^@_UVIshYEvFhz_Sv7kLzaja-x;ppL)5$XtWYZ z>te(cD|5I+#lVY?yz9^#a?r7M7>lT2_f5EJ*y zdgI=@8wglf0tg=jI?1lAu$IM=uFw1DU6Fy7Ie@MeJQyXC;n#~Vafv8LX*-uIYSCod z*)gi=cxEO1Wh;mYsG2W#+)lZakfVe|?j8*O>6mDfUp(ag%CvC(!8WE~*|+6?5@VKL zQ(vwhA%SNB)x3L7H|dn+vv?$q>+tTKhPyIWUym`@c0zhQ%bI^Q|7U4(fr=_pDf22>ZW2hG-dXxF9UUrgGZ zL+QAXFh%t;GmCXx7|rZAwpPb(yuJ7VqoeGl`cF=C(e;}imf-098BgYWy<}0Ifw1e& zcfGzn!I-diJbepKz69?&Y;3!5z|^{}FuISlnyjiwa-L3quuJ!V5W6@3tJp9Z43c`! zwKm}8sL_9@Rr6o8r-*yf`KauH^;D#kQa56G@>=U7;a4*o&wJT?{Ak@?dfEllU!Deo zMX2hD81CzXR-N6T3_{Adu-26=N8;S&c;sTJ| z(B+3}H*RznO1!{@rkTjwnbK2>_qRU}x6-oR@5AZbYfl^gBK~?>(O6HJaleB|*KE@j zYTUcgLGpWt4G?f7)*P>f=~ip?@;T3TMK88a$CIAFA-a+FaUwlSztv^zlKszOx5)r? zOh=!Q4t#TF0p%}#kr-AM{Yl28BA(y%JBB#*Tf1Z%vSEL!?>TpSKZG+aeg!#=r>6CB zT@cGxX^1n*Y&a$S+TRsk z)FbYX0cb3Rc<&729-!wdLLlBf`2(1U2LmNgFcJ}tM^wg=yy1bDNdqE1yrRnZq>&Sj z1Q_{Gf`F_tN#*Fas{8TDJtion1ujR_ZnM1_9h!#?Qcx+FULo3xxk}>^S&RKf;-Zbp zw)r>M&c3R?D|`}Uc2|8fP0e)uo+z>VdPw`jwx9pg&L{LPhMRI z{nvvlrHf_rl=w%m!s>}j%m5g0P@bc2fqnHE+07}x*Kz*GQ`AQOq?jd>1PU z%GqY?YFf$t6JLwhr8?273qhl>|MS*fmrLYf;R#|9yIJoFsN7LQUJgmPE|9=_zj)$R zCNR^RL01$?sg((e7@XK-wy7VTH@`^qi>-i;Mql%Bh23(f8C!wIiT{qal-)q+gLuQZ z=mRPoM6aX5wRu!4qJd8XoW8OjKkl94`RDDVU20ya*3Z(HEwLdXHgCA-CK z|Ij`o@Ig(!Iwj>Jc+0{|q3VQjq$7_wexer=Xx980QG{p&6Byk09>7?u(qL$jBM^ z_i|OwI65C^;0FSO&1OFy$~JgkpRIbG?+q-+>Pb-gJgf?$EPA*5v*qXyV)el1ahDzK z?PJ_eqh4b_(&>9g<2*2}-En z@Oz&3S?~8>@4vpg)~uPiXW!j>pL6!vH%?Psg@lNK2m=Fy1P)U~Vqjnvqd%Plc<4JK zeT-M=FG6>ip%(@QG1dJC6C*48AqEC6z(L2r+dxefY~|+i*uvV)(&n+hi#r+`14Bm6 z-`&E>$;KOCX=CT$D$91#*2M;Ju$E=h7gG~db62pjcYpsSRkSxH&5$vpwc z_=C|1Tx`580RAq{u3lh&S+;-hg3-V4Zws&i{sHlJl4XP569gEjX#y17JZ%7CkHz_| zghT}a5>k(a#05pgg!ll$f_6ej<yKy!qUyhTb2#Y>Ays9asLlmSFeAo39T>ze+zd3p~r&vBK-qsZS@~GcOOsZ ze~4RK3D`K>xY)RQd!b>4{sZf7@8<31W$*TXgZe+i{~H0cYSq;KL*sws#l_`6B)q(p ze9>b3TOt1=w3kkRyNv+S#>>se)5=E47tJR7y)^D%1y36bZ#PdJH#g`1GL+_ju?$d9 zxHpU>0HkJN<=}cR2lsz_%0|(`+eVh{9)e#`l3!RvM@R%LDhU=5<`x_P=-prv+jv9PldaCfz11N=vqUg8?WYGngglx0I} z=&^%?HCR~0)0SvISC zC$O<*`}b{!|6>jO3$35M4I1hHgJ1j$%*)Nz+t0$&25N`a*8ex>6F^HKa38w=5=P+v zD(63A|1He_3mlz0?tlI#X`%o8C+*p|qH~=mI;rW)+kC*lc-;V3gzES&AFtsD(JZ`? z__LUl)9>Ni@pWy@+Hd``6Mpskg!80&TQZKf<7p^_nkIg4?uX^uFAo;IYnfN-NCv&N zfRBJ$QRHf|u6%+We&o#T8`tmbM7_lPMD4zP`t+tn zq$`HMp|keB8Q$ATI|>5@jJVqXjL2AI?32xZZ{W-M%indpyoX|A#eKIV{G{#<{7B!B zQqS-EY5?4vgAL7x2P?q&Lk`B7Wp!mOGboP|O+HrA__ilM++k`q@M5Gwr>yk*TAO|Z z;Ib$bp@^1%5+l<0`>G-LyO5i*uhup;x;FviInuioj^&k=&W(_J7|O8#Ef&R7>im<= zTXlKNe9g(d>Ef5^>FLz|*I!?=Q+~C2C}QGF_=9Tz;cQ|I8LREq&icSh zx)oT7DncNGad>HiXprR(4Qm1kiV-$V~ zyhlNp;BEG?4k_KZfgihyF(7b;ts@hP)YteGJvhFc)_pJAe1beb_;_$02Bmh{QzYV( z!-%SqmF9vGWTTnwnRuxu)GJP`u5C#F%pIF8ELp;!1(TXr{UfIXeu=c-_k^*rv0MCm zPPqMV6Zou(Oa(lRXj({8^vP})OrIw{zax6}8d5eiTF3JNj0m~-_8E#(Y=@(cHt&mf z-GbAj@AWGlo<)LEFQ@cf&zIFglq{^jan}2&R ziQ}A5*C`&d%VI|&()7je%h>*8SO6~dNf^P@G!;5}Z2RA~=*Co$UyA0ueb|DjC7QR* z^^uJ*T|2w-$cvI+7M+_U<|r=0%6@%EKBq?MCiv?rnJQ3Q2el8zOK7@oN4|A8)QBb5 z4n#YyA3lBN`-4+rS_x2VB85gQ4&+a0JeQ{HR-}u)vr0oJi;Taf|F6?ceZ{95dB80X zpZ-?Y9a?2m2z6Y{9iv-&4?h$-I(9JfSlxds_SGdPQY?`R^c$w5hQ|H6g8i;nhm~+g zKjAKtT*H1f_8scHJg`1K?PiZ{KEo8Pls{F*AO4h-W}%(!v5v~Gs90Iw20as$V7yq+ z9YzKtq$P~1CWEC~%%sit*hNc>FkMkneSVXLRKnN?8UbQBi^I`r4?Abz;?)lPd9pr{ zi?;KG(i~8ea%uuLe*g=975Pr^oRUp~_0+BN`n}zmhz_(LUY6yaCh0WijkX@%d1P)V z9G&~#jn>BK`|Q{y+=ZfEqXYbL7q$&PR|Aw_p3(hewdEqkeiu#rVGMn?j!McP2uXlT z!CF&C#HQ|x_sC!&tDig)!aXm-pE{RflCWACj{`}G80F8q3#U3XqH4Yv&y{`4w;X^~ zZ1A>v%&shHHqEEKoLxfboTvaMbA-;sTOg=B52fUiiC2B=e19NT%i;`BGroAmTy4c? z5np1(kJ>unE*zz8zZ(AzwMCN7TFyBitY!(;$fG`nTgkraA)BLcOJE~RF*}PBHE_lH zN|$XfxcO*+Pl23_^@&W!^Pk@rl%&znE^j_hSBo&(#@Z&}JV}Y-F>jmh7YFmJ34IM7$4Y7*dB+w6rGEohX&D3Y`1! zn*NcVbvtY_ES_3jrkpd#YuFR_HUgil^NoqMD2h%o#S=ii{zIjtf=N{nl49*NL+9CaD<-hB23A5NxF z@-(!@AsUNk&*X@}%2@R&jT@b@Sh0epCSodstFe*j8BD zJ2XR7Bf=OSXIlfVmcnS2WfEUkl8WLKgX1}4%XKSg&weqN0XBAMRr!Xfa^zlgwq<^z z0x2;R$*PO@<28Xj84K;BhcwIsFw@^kw&;pY&A%})2Z!AG3t&x8D57n!^Awf9dDM1Pa)ULT$pKSYeIU3wB#O$;?T28VD#dHpDg}PNWQxKy?yImazwkz z+gA8CwGZDsxW~w#_Lozp@HY0tQ(w82H!{D2f1Reic_n-}A;M?(ZD>YzBq8}dz3okm zc9qRM^?TK~VoRc4-+~DxzCVj-BXiaEI$K^k(xyvJYVqIx6kFl<_(7ZRPs`bzyQY~w zR@uPW1<;OmJ3~poD54XSn{qDY?ZY!H0%KDHwW5-ez7%#9Kxxg%=98n2HfG-?0$duu+Fl2{w!CSJMHLDVVRLdPN-VoeNuDS4bygA>1W zwdUNWKw?TFx`-KPS26xHp_ZZd65u?vSqlN0C9}x3!k~B@#PZ0kLuV9U<8S>{3lR=vkhx76W7GI`CJh~cF!GYMI4wwglc4vD!WrGu9 z#*zXkA?QgoRcYApQKMVy{T5I$iCJ2lO8eQfJ6~mGWnLsnNfMgg0a?gtgxUFUwv^Xa zLqmhMpxt=(_0eGAAsKbTtd#o;I_I_)QquKBc9wWHijgxXeDn2vm)aKVDjcnzFFu;y z=DbII+|jZzzI~T(7v4apU*cxk4&CG_WW9rW7MkpGCCt*K7}JpUn5)C#Ba(_G4m0ls zv@ovafC8vH-VwR9yKuYV8k4pkn@uZ$0PgE=f7C&(EtpB^->Jyp6cgv4mzbx)Q>|-D zwH_x7i>)uxa8gp;l8z$Uex76nSM~J8knu**aLw`Z&zGD~-eDk773^G2iY+ZItC+pz zTyk<>T~RvqKBpf5lEl>-%Q zfw}W;JTkGQK~eG9$a3MB`9Nol#g_~Xi=J;27H9JZSdw)q~ zB_E%y23r(5M-4?1(Brxd;3Fg;>ybHcEv^qci+eGw8 zHuTfvHTQ_djx2{<`pK|IHacz3ef;z5K#d(WEf9pu{q`_wy6_{%kJWjsa+5+<2@RmUF8Y zbceU;xf0lLsb^w(5KpQ?v=4Ze#n)0=D#FMBwBETpL59Z5#VWf!M6gUf;{%0>E#O{f z%Nu`GyyR0?6WEwN#&L#d-+q|ZAKGwk}$?Ma^{PCAP! z5v;hl_(g=7pKs@trKMM;8Pv1EWS17E5JT!C+88=FS^V^M#wNLZp>m?>Jj6PZoFtD+ zw*65$VUbQa%5>S;0i(n3z5H$>Au2;(Bds->g3Jn2%g7cRX3Rk5LffzKvc`!~z6j6Z z_Ji>TVxOk3=BeU0v=J?Yun@#);gr`urHGX4l2SMKK1?OnwO>6_dhQb$LF{YnO{Dno zv9%`2?{Lg~E_Q#VCT>_-CnFRW4qvG{qf}*HkeCgN6T_-9fE(!r!;F;yMi24%yQ;Ah z=>bS#ZyPg*-f`zV%$^p%6H|~;_AG?HNAXW^`nedxA&A~IUO3>~8WR~Q*n9a%P03n^ zM0QssQa%7<-A@qPK^U`)rg{J2w1+X)4{r4+(!(w0?tZ)s|5|*(isAL`b4mL)5KHRr zT-1B73K4}O>*?jXX-MBsvQCL-a`KVizjQFhGjQ>18fDIcQchM({{$umq*MCm%#*`W z+0BQmrAbGYiccISwi;JYH~T6M8^8S-$YVGskv~BFd|{Ro;=TH?s1K)z|B|m*TZ?K> zHy!7A9_80taTuinp?Ql<{81f9#U+hdo6Oo5vkN*XJiEbfWFEB-x}m}oringq z9{We^4Bk*^piVtyD!2qd{NS9Xf)_%edm{kz$deCR zh)>HO8^qzvbS~sToqz7eI^1D$a_@B=`=b_(7OgySc;iBgjDtbd2(^V&H08QRLU#4=c1} z!SUL8GdKPUnf{?)nYc02m%8OwIIg&ME741lXlAq@NE)FGw2%?r=$#QM4=ThS2U(dH zc?jbwwJX@&6(JGt7ls)y8#>HwW@$Yz@5h-QZ!;!<<@DBW=BTf{g`5;v@72D?413?n zdHJkwlpcvyIypH1BRUC9L3Qy>jzx@Zt$~O)SkR4zw%F9FOF&UDVuhfX0Zhq2F>lP#rX6#(vi`H&pbl{9Zn;dp!) zVcw60xQs!sH`xus7BK*3+2BQLAaV)jK|~qQ*?B|*gUXM%Gsi8MD~N_iMm*3%?}K)t z9`?Yn_HVW;))^9m*kL9zSa5hU^XJVf{?GZ?m?aS$*+&)wtfinq2b=3|`9s`0&RzfH zAa))iSRkk_T(SNqdS>Z5r!30Lv;N2$=WB_b>{FB%5$U8-IY+^gP!6URTNX-vQsXMq z88YJr;0~zx$l<+`Ngh4|lF6@srZj-1SssS_CCy8Z^XL<)t}f@1f0&M@=4n^EfdUa5 z;(28?J1#x2W+6%RQ2AH&-g+SuJ28L*!M>vGa0pPAb6XadVHdz{-I3rt6`dp$sFc*x znu<)5shza-z+U(ltD-Uxw!eRQNnjcQxJYtMhJEjURGfHoIy|MuQ>*yC4*SC$ zFO&)qZXcCa{TJUJtfB zks8Xvq&F!}IeF{nnx($DgbA0u;!h5G!C^&Bfx1Jd;$ME~{c@1>A~`q8Ju}RwpRtCb zL`UU?(s?1MA9j=7db)SSFM@K$tQQ+c8oc;&wZZ^BDFE&@imQjm(Y6_!;qk=IrPbHX zRg6gGmY^*ZOG<`9JbfyiRms{yn)*&lnCWkG8!NXFw0^p}&nir{oc*pSNO)8*mGs00 zcZ5gKwdCT8bFu`qM9#rksfwyMR+6g8WecUMrh24!aTp*jWyjGVt|~11)mYYIh$!{T zpn)=kPke6Q zyu}Kx*B+zXtOoW?wPCwyUam(_OWYCXo!i+m?H8xSun?uPtQoIkHlamwLe#0ETgE;( zx~b5)M?c8VJ> zK}}%l_Fuc5KYlRMP?YB!8EX zH(M;o1D893C;s_mKO90dZ$pF_B#VL9xD)C|{_un81 ziPsL9(1sI`ZF!+5isgqN7P77CtTXOr+fg7V<@Ey-2~na5IAcvYh!@h5NJYxmXfltH z`fcHIt{ZD?_~(smg)HOsl?W_EVIQ}!*`zO;g!#j9b|w2YZV|gml7Yj*z%!rzKq=Pn z9WMXMqTkKYlOI6zt(A!qk#Sr)SG=H3$tm&Xt!{Y~puQe|q&UURtdjMbxj3l7LHf(~ zIuwf#pTPjqD{`oF43>Gdxm$;1u3L?YeB}ZV_bU2 zy*fXtH-{oG#iCzkd|gS!6|sN~VR$+Ft#muSCaO9rnbOH_ z6B2EKTgM$a!T8B*-u#Y49Uq#DdaY@AzCh}luQJ_7IsVGBlNt&1jGa}DbkK2HQ_%_oP@vI90vU{Q4hsg&8nhq>$hw5qNcHXVV}8;$i) zd-MDe{kfv6X9heyKa|bQEtevIW~DZW8Pugn^48AJeG0yLImxs0 zD^h2Mhf^g5O{|<3i=MmSq`rKP$|b^rg+SuKtAgg;7-mh!SQtpvDpsMjDD)X+`xr*5 zhW3({L}d8e=c#OjsT2qq$n-O@Gx^FY*1GW7U==T_phd{u!?4Aka;h-GiQyTAI67pK zhkSZHgZ!b=W&YRIR#bO2S?rf*k^}qxzkaG&swPvfB$7pv!$DD z8ATP~MgxrQu_oh=nbQ_8(|kRkaB$a3sYr+ea5>uA`B`4JJKa3Z0u(`6x{#%sX%!@e zZkvVE^^t@at60+C(gNPBM-}0Y)a?0(=_MD~PKtC9(s1e(Rbp_2r8D9mp8;JR%=4pX zb7uyrve^v8T71YTUC1b}o8LeguCLe;?4L=WT`H`ohf?#QGR(5+e11s|lVQsu z4s(WBq~67${L2X8kV!6J7S28@*+Uv z@lm+7FKg;Jd>sp@_ANVWp9f+RM9pI0!|TYFlR_5`XO_+LgwM4UJZd!5Oor%CWl^{{ z#xbdrE@VkIT^L9Vu<^-o9tke~85fUh=#VHZPObbL;M!7dqN>lxt zj5HBxpCoN#rpksg;PcJr^Td?V|5%*_;%NI-qTJ0Nbv$WIU5JyvM!>~v z^X}r)Xfn5go4NH!DDi3{cG6i)s{DLgRy{M*m_7;2_@`^ zTq>fZ!h%vV>W`D8N(F^C$Qk2Ud@e%ouw3E%fM=5Jrw=SwpV(-;iURRcQ#JG)gX@bf zRgEvpiCUd4`#P^)WoWY8AIS&6yX#k#W?h}Q0Z;A{WlqV(CgL({QrqoU=t;x@u=9J8 zL@~nt;iGIGr79|`TL(T#m ztVlr3^SK`+&s=tuEVYmDS%UzwjAz$cMn*=2z<>^-isBdP6~!eu2vQBVjgE|+0rv>T zn99+@^{xzoz1sl3Fz*YGeR*0GlY%1dFjIspMZ%R8cTM1d!tx+^GB8*THu{{(=vzBp zoZ$xj`|@A_c*_8wqVNwbg*NW)Wi*u-CUwOaZl+W#QMm1D;(6zrD7^-DCg8Lja{vAIVaHAIaD*F2&VH`*Yh z*iV+Y+_)S**%$+KTqhNrvgnji-^Q&cJsAqH_z4SOaa?b-v$fU3f(Lxg6xx1eU!Eb; zCBU6k4rE86tEnzNwa_K6Q$@ar#>;uUhacOjq?8lpG=ZD@^~L!(8q3kkleQ)#gt-|N zro<6LuGvr}O(RWujta=qK-n9G>?2*ZBSEpht>UlWS{kZ!QWVY3>g+F-$l9tAl1A-^ zMb;40Jr1H!b8}p~iJPf>=Jnm@)y|cB#8OC6LPCO`(8r`7cXLCppMHWI=&nviN2O3odMVURr7$Ydnc)4B~ncC%1Zz^W! zu=@y>^BNcsh`f?3BIK0yr!{V9L z$1C2ur;`IE)E)9%;`w=aQX#!|*;h*{3WV=+KHla3o0d4}_RIQcjq9W^Od{yjg0YQg0A}Ha<<04h@&QL|;T`k#99auxWA3x1;HK=P^A_{PHi4W+)lDo$oL0tTt>_h;Z`}_0ldXPYgc{O@pU9hWwmz0=B3!B&OYga>~ zV9K#MDuPrN<-k+#WhRxi3GICgCnTX^7}t_Y9aaW6k_a(o7NHZaSx%~|7v>qCc2PbO zC_O<6Q&)F*DCU^{ozaCK$tSoeOmOUjURyapUkv&SZUu_jGAjXQt~ZMf+6>gFbi3DAZ*1{79^^7JRAG}7n@3nmu(QX6`cm3 zvo6UQ!lIneW@z;}q`kVEelpYO)@^9ftL}=Gyadazmk5~2{}#dO%AGc1dF_XXc#`sATP54 z`)r&1+F&Xak)eegBGPR4sGpv0m>GYXtFM8z-fVq{Q$#np&jCRAnTMdNEU^X0L46mFrF=G)5t zY&3dGHXTNU93pxCo7SCQ`ad|zqx6nH{Wwa;6N5QMHvV1bK^SFnYUHZ^{@v)ZawP$E z**v?6)BET#p7X8(SV+~aYqz3=UY3bxwtr7w3&+;?b*vxL>-6E(UqX8NNPIYI*TmRv zzKqmcs?jGBsQeTvEI%Th&8MTrNfj63waqEf#`yA|jX1IW&$I3W0@J#r{lzG7bp^j0e5tXx%NH;ZJ zI@~E)PW@^nU~NVlh#EI}ayY_G!ok6z`N7p>%`Tl0wAnzz^_HYLAIEc}r>O(3uHk!5JCTX7D{)d=yrs47&~T z#O5rdV&PQHJXI#gXYs!Swq8t*`s|wKY;3&dfj_~NF^XZ#r319QOx4N;6ys|j1~lb= zUhwhuaCE^gp-y%ecG|s!X?!J0T^g2na%eAM(uSTYtZG5a-!1tY%@M)wU}4tkgLR~oLdgPVAm zsl+vHHXriWZ)`0<^iedtdT6vnDkBd-C6Z^QSqaMl1gWow@c5s76g+xU7)%Q8Bq9H5BlfH9|`L$b$F{2Ecl!%K1 zoql}DrK+sKMT`-&J*|e4=7PT~VinsvZ4M|Nj=yXI=J4#lvF}`Fdf}EO!@bL$$~qcg z^vBCzxtiex-!7MDuI|?oaKPi(>tEZ+f3K$sRc;d?H_j)|aS9~30}7G;fMTVx&jm)4 zzNRMJ?Hf|LE+eo>@alYPJStwz&d;kYUN6Sc1tVSEOmvPD&s*lJdX!(3q?=vYc-hD= z+or3%a_v!DU@ycZ8=Of*nL8cYeTr3v?)hk)pp^y%to5b|{3TW;PChjBgF398$!tHK zF9#BXexuOxqZg!DY3SCnzf^9hBXb*?(q=H|taqfUoI5cz{cy7mN(6HZZJeO`0&1D+ z)$VY(3P$5e3i&E5*VXvlpG0zU{G;~q)A%<$BX|hWOnV3}4?R8oUl>dH^F+w9D#TGk zu#VCXMTB2|Tz{aGzB)LfK9}Cy&5W`i!b67+IfJM@BsghOGhT;yV(+|*-?-WP(YqSC z*T$bUl-Loi3p$cghvN>1<5ie3aa3GcIbX=NbyEoue1fu8;X3BMq)6St)1G+x(of~@ zK>^`Z*s2{F%^47I@MgNx?qZor=Ov_f7l&8XwCS{w(ZRuP6Wws|=jplB?F3M+uV&0% zAuh15^7Vp_|6HrI=T7WYsm&){xVR>=&l*Q3FZK27pJcXxoYnvw@QzNg9gVDBcRh)Y zt(d(;4GY}liKIs;ye4PiZD`{q<4F>UJZfue%ljRpZv{tYM-wvY#c3pM93-w*w zNoPZ2?oP=(Gf5Hpun^fru#?vPf~Ma}MkW&eGL1zu>;a}jAw8^Peh{h>PUd8=VHp-r zQ%aP65asS@N5%D_{QZ-sEB-{&=%Nw_Y8J!c9bJg)CcTLrMf#MxYvIOyqhIi0(!xW@ zFRzQhgfg`fGM>sK1e^?L#Mig)tVoeEVehk`<;(bESjcDyMfx&T!g5T{4v9LSAc^VJ z5;ZV)ckvOd(Jqo6;rij!e-+k-?qv&u8tB(wM0lI|+Xaa>eLpStS`Ljk&|9Jl_ZMxC z5v0eY8O{vz&I+A<8#)`;ZN>4=3Xvnbe&pvHeL0*oUdg4>Eb{k1J@K9gCNcBE)XVSn z!dbZ4X{KL)gIzxL1nQp{ep~EgC%QrY(rSZNQ@EFV*nn;=>m=LCXN%rZ<~Bnq)DE2_6u<0GdXWj z(?IG%XJbNV-{e_{jpx^6rN1Abi%uPr6l%N}6bbFWV3O{4L3fc(FUTA(ZP*Y`98aAF zTznHdeeM@C7C2qCWt4&!~~oos|a{84jfh92+{zQ1-j;3lQ*V*#hBCl~1RZ_`*BAriwuX?*wWA<;@m9TjtX9`*; zJxpyfSNq|_GYe7g0tu(B_+gUBCtlxF4=w<){4&d}sqJPX3ljoEw`rf|(+bLbUcY;{ zZt_5~U-cox5y!;K$vv6gz&B;ODwmcj9o7^nDAhO8UR)AYHq)#89&jczlw_`h`18D( z7WkmK&7FUc2vIqQ1S&c7N=Vm2O_DBRTRyok3q(x{(lrNSn~0)Bv64vtcDE9X(C zr)#^=y52?X{yOlwdz#Q6m}<;uOK;?7*$oX< zM5cLVTiKANld3;Ggm4iS<2`d1uE;WgYz5TvjC=_X5C6*%fu6(U|ExUYhh?s{`FTM{ z1P&36UZ*=Q{GGvGpKq9{euQ`#uq?}-j3-w~FGm~Z%;!9GkAgwTQ<^l;DDZ4ppI%NmF{XtR%W)UaQd^vt^vZEnpG zQ{cjVz(XYbuwM+~&ITJs_iov^aNnNt8+^1erq+n zx$5UKx?c{^^;k)g!E>X@VV>*|uDqZ#%xL>pL(|ojdJ}hA3k!)wgUSsU?<3}6(2Sm= z+G;FlPCznYtIt=whduyoc0Mu%r2PH+>Oj#?7FC2gS%98cwYYD}>s0YhiBILCJC$3_ z)~y?YN_GPtNc{?yb+|fOYE*m9VJXY)3*Ev@mNq}bEFT-qnhbzu~ven#`$6#)>#eN#$OgKVmoYr#Zg~ID_o@S z@(T2|0wGa7jrFwN(DZb{jee{d{FW00euZgQz?vUmq$EM+`q$ZZS|BSOsZ79B?}g$8S~4cJysw^_lOBGF3 zvnW7zgoyInkNue!jKpZ~A(ZJdwdy&~LFA}^xeguyI%%aHM(vkU%&pc6lw6+xDtl7gz^A~-Pm$exX5;*!8hOn0)Dk&*Z`CNl8J{uE$~aI{Lg*q)@9e7a{_kfcXHk;kTz;O zmeEr@QR*ZZNBhVg-u|(V8b5{(eP$<9FV<_5A_I?uX2CDyoDAxG0FU;-aB0}X^TQ77 zMlumtAF1zmy>lzB;rTay{=h(>=DFuE^vQ{OCD#BsOf>RF86;X-iDeV5{gcB3r#RYk z(cP%_T3{7gkTV^suYuBM8;i`DlSuOP$-6y3pMWYxGyAJ<)FnmTH@B6qM>Dg5Nx#R8 zNJl@yuao1BhfcFu(FhSsq4^Rn;$>PmQd0Fsmr4WmX}qoh zD>ZF`aT%?3tpXb?0T_M*LkRmd&dTAghdWD}pXKl)aR~S+B+xEcn=`g@Yi8ZH(()@c zh(TRe2Q=k=&{S%u6TTB;fiJ*$8ZJuf>cqq((0k5{kWu$UO+-;DkZR!~3)mxXo->IQ z`ESj71wTUeN2csX?~Rmnbpl`f_3R#i$S3j$ple!`=yso)|n z8hb79_X{uTle+KeEe;DW;^U6xY2}5k%J3eendovMHOcnIgMVZjg%m*haV8m(dof zq9g~aB(K8~oh+_}pywER^;f(lAM`{C@{5X2wrD9~T;t3of9O@PVda-zYlQ8c!MDhY zVnBX+YN}X{<9!pTo5r}@g*gsA5MQA#LHPoHgPjKQ9P9TNzYff6eYlifCw2SC3g zZ%|i{pDv`{p*K&QKdXT^k|+l33I`@fC7*lptsZtDv=USG(c(yvKIS|LCry2#d_-P{ zExHJ;gbv`P>t>U`|FU$MzuUWrJ7UkrB(b0}W?G!LT0z|a8WH0pqEF9N6 z&xCe1-pM3S;OU`wEUXP^>e#FwjIM}p3lP& z`}T=TN6J`mk<*J*QOb@q|7}J^=XYlsvohIac*rJJHXsu8ByB zW^p0YKJTsmIQoz*@vDIrzYwq`Od0N+P4W3@&fjw@Kq&a!=I~?Xr6!WRILUn z^-(CGOGMlKgN8366l;=t@gIH?LjuO zcyR#hQYwi_p-{BVd(cGR2BX*0;FSElB!Z{Mnxk^wgO(E#-eo zsZUL<>}%*trOyy3wzjWl<1_2_^y%cC5axatV^hYtzy6zp?>tkqj^HUv{91U*=X~P}j3CYVqVagl~2l*1IPl zHOZ`s^MMmbFB2QWFyS>|&u|K6He2_3vFT*&%EeIhUMdI>4kpN#23Nn3p*T+B?BHE^ zzGLYq?e+nRe!yDFv(E@_9iW6@J^l4Lg;cc>*+>aAML!{9N{BwvJ`atmW+dDJPSS&w}kIE#(4=-|{#d9?D-NNB}IR zSYf8ogT!Xh$<4`jjth?-ADjgo6Ue%x=i*USnMefwu;=B9B2EuVN`2^gN&)MemndvL zw4X~ce4B*Jrso~KM8IoQ388PgwAW4Ztr}6w9M!5P(#AzNW`ga_9ZSN>sz_C1%9(TSx-psWT&{lVZKEAJ^+*jGIk z?T>E$@X{ukju^36?KD^ZYVMFXJP|4S%=nQe5eux*ml@ra)Dug{_V(9<*H(gb4rz(cLcfN2My zN?kiD_B0;*kFE^Hs=tyYltwW?w%Mu`oPOrztAiDxBNgJNLrqP2#9@2<$m1W65s{{z zv!D)|Xr%UDYYRo$`*LUlM~(c}mK?O;L2CGP=G_>^Qg-NYY~6_2vrvbXrcm z^!MsH2Rj9F|B&9nDe9p{Rhsu3QAqU-vFKGo>h2}3tP!c}`;mdE>e_ThUgRsKo3{-< zcXE!8F(~>i#kJ-B9%ytf(x7tr=_tCIRVX&H^^~ z0uJY^a=tv_)uaRwqO!xN?FcAEl;4x@ZwrNceJbbXl6<4D7I>T09vY{?r~xJX+c9?2 zI+>;;8SE#THm#CG%ORTyc8iVBd!w6wy`^6ynnp>geP|PT#}yzM4SGD zheO7?Nw3OCrdp8>M3{~PTV6XcL!nd{jF|yZ?&$EGt*rC_dVVgEH?XB&JgIq*miA!R zHQP3;TEUwd)i@O~Lcn2kOG@^v$@8`nf_@VU{ffZy@CPZ_Su0>)PlZ-cSl()<6~}cG zlbVDpa88kxIhk6u(xtuHX*hZPA%5@&v8X=-{6#-jrsU=v(0=`5>EDX`zmSYh0DFEzVlam_jx-u1~9ek zvwSf8fL)p((a24}7bV;aUzsn|RnuDzcx7(M6jCex%Y1r}9pGbTOc&_lNbvhZ8lv@M zarz(RsMbMOAr{)V%XTw3Cm)rF7Zr7pj^GpmrDD-lLNUY`Su3p(9qCKxI81U zx`r%Rf~y-6{uzRWO2sUX)D$ku__Dl;vs_b7$omMsmsx8l+Q=KAqwFy0CZ zHJuH2{Txq@_+y;ayRij)SQO7t9EPrx>pzth7msfIbb!kG6HSdJ= z8*QYg?U@R2)_zt|~ z(8C`R`7}MHuM&`sH;3=%rW0J)Glx1Z1*a9_Xn@D=5!2N(rZKMg#pqXR2JsM6zKDvBEMr0@P)N*LmY=5$5Ia!o18P&Ko) zk(|6eIgkON4&~wJk3w&~^;!wUzG<pKSmI#T9a1HI( zW7e&X|3uC(#;vTmk8P8bW-(_?%;^+CMljud7kzq_VJlc9i5vYL5%oxVtWX(Fk~Zg9 zww|H5m#R%SouQX<@jeo1OoV8p{Ze{Y?S30}?QY{-vBzd|R!()_kBH;g#|qhGg`#8cnKF+<$aavOojsEy*?S9bdn9Dm@Amor{`YX7 z`+nW`b6(f=bS-pX7l6&#uxIGaZPCGkn#pfK{k`@pSMWh7XI?K6=iCwp6ZGWvAleD5 zIqX?@3Q94LG|R&&&WF30X;v0p1+6}jCP@>+ysY2_6UDhiZ!ncO`>95Tp$73tY#Nt( zQU77uqW8y?)OZ}M?AvjHYl~`t?nNYr6Q`SRs7|hX{EsVmG=A)}bx$n)O)p{#U=csZ z+m$@2nW?n>)_*VmH8V9!G#T&B4tUxkz)bhpiJwCpSuW1Pa2aYUxJZ{LGthLTBwQ)P z$s5j2YSbq~?xf~5JBnBKhFpmQ`i0b*4=3T@Vx<#Xki5ZY~83Jdee{6lIgx7^x-D$y`z+@Fq?_Ra5cWF!@=*_Jn% zM~9sDw$H$uq?1{T&Gnbv#C<-!>i(?DG%y5Su}hwO4;8C__Tq3Om=71p*00>1usGhC zbwR@tkORC<)-}e0WOGZ@eibp%L*Jbhzu8Z!m#XXa*cF>HnT_CpI>-xC1-LDm06Bij z@p5{y!c5{M_B|~usKxc;CE2=pH*eeWoince%$*ep6r}EhVhjPatVc=s<@|OPM>VNf zTBwT4v{M;JF=gEH$RND@jy)bv5aesm=ddD3fEjh{+$^!`p&L#Cqy@agIY^g^%NnC0 zIGXy9#$zTZ1A?0Yfh_3kH5J^>6ZKwxQ*_FK`}N?G3c#g{cPEA=+N^~V^p1c09uh&- zL7GC5lk}|nDQRivGNH)8M300aGA--shW)KwsH|eXv6?EwES_VS_TB@K+jy~eKi1^r z5Elzi!p$GW%^q!;cg|NW(c(g>N`6YkM_M)-yWM+pp8=7iSW1PTOwUDBR7f`oTGYzZ zgMOd44H`CHyIbr_pGG}K{yxT<&}S!JE=!VP!D`3bZ?8sq3}X zsm6$;A+~uW&_s}klPg8frXsbD*D#cEei_`ECyEQt72tD-9ROj_YMkd?6*0@D4bf*A z8z28#LVizaM$~Jx>16Ejt+@3oH?7du zUtL$^u#=Fl^FH^84xr=I%2K0fxs-#Cy(c}NvTRkiHsB(up$7;%NYuLy)Usg-hn+IU2K#G=W=zC?1eOCD-e#wp+`V{z*^ zrB#_C3`SbrIs9}-`FRB(C9j!`XR!NRH6s6b6?XYx5Bt!m`a$9fJJ-wOriqspQ%P@* znYzqy3$k%JG%5HldAAKU%5f;QKRXi+rXKXi{L)mN!z z$~d(VpYKp>q}YisbWSylv%UO`L;b%Xn_QbDIgOnT9~HEr;<~vSEbx+Ot+6qTsAaw< zLXM76=qqVa9Sp+Q@Jq9_(7SEH9PQIB4z`aUFW&q%{hur=v|W#!}t2bHBlx7XsA z*K6I`8cC{rygU)}swIGOt5+S`pkJ)58aVY#VX})F8pv42`Fj=rcLPfx3x*YXIhjPv z!LLF$6JYgNRkwPD_VWBUCU_Z1O7m{M^U7bQvsaETojsC_yH=dlv%aoYthJ>H@5GXh zqRnU`(&GCrjJNHnQcRm^K85U?XBKoX=qTd)FR3cbivu71Qi<85QU<)wbQ)e%{)Pd8 zjO}WHT0$jLPY<~!`za9)W|2Rp&~+1+(-ooet)KPDOU1`O>#FfI7cz(s4>f^9Q-Ljg zpdr`y{|+%JOSr+-Hz z`<{K(-qp_#uJDn@yKWV~_J7|33o6ZE66si@UH*XUrun136#s>L=toyvHw3wnaOJy9>>1{3t=z)4VycN`N!TYJyDiRUe5svp5V26Jp+E-N_hKie zIKIxP4av@J5qV zYvuIu<JD_nGf*mw%JW{or8zYt|-7t_t->_IE3SDAXub?;^v($pjj#++YL zEsfipyZGex`X7@MjBAPyU6yj#oyV;Ut$O=)P+gP6;lkWVWf*QE<5*-GV5FKW zKg4mIegb$~BFHj!amfF3QJ*S954g${v+F(1ztfA-;)5XknbVqS?(a}XQIQq#Em+yO zuf;7swipse(NbVX-_^V1EHru+Fm|w2~jJ6XZG&ARt9fopp3bQ{zONwonfyd>$7po}#V z59`9^hz%RdrG%|c!WBRpusm{Fuz)*OXphvvjWzE4a`fld_a(#hExYO#noFw)Wu<1B z_zjsX!ntQ(Ch0BQ+$s+&=PrJ4cEtX)CeTmh?q_$>^H^$LGv=zU+y4@R4o{L-#L)4* z#2fdj3wQFD?s*eNE(mggWO`5URNr{MXq3ULo+=ZVY+_mS?hV|u5E^z3~psf&(+mouSQ=toDXkD%9P-e+fAv%tJ4W@uum zQxrJi@j+afLmeHWDOE&xtD|XTG>xV{aPJ-5v2AP!TP>rph8l5T!wV>3Db~W3se=Tp z&&+)4bJbJ@kDJDD8*oe}@aJ^G97vk`F~tUywK-78A)fPNf_r z8y(i$e@}g+C%V^(3m!Z6e@uz1!&r?-RXm}}N4Nkxad@$S@=Rh4 z9!<~XScExRcn*c#<3VH18%k&!kJPL0t%{r$DI<@an~?@r^nCg1WLr7V3)iV1Yx*x4 z_hHb+H|@l2@7%b^7O~K~C7_GUJ;!TmT!TwZ#x-@y`Nz~7Kx_QwLRDd?KS zhj6=C>RmtNP?SmE6pL$TXoFBpL#o|WLjk$iC#CL~CKjg@e7ZV@?fZ80Mw!p7gq_#4 z7S#C*$kZVZT8LtwrQhjMue-IU2X#kB((mP)5Y`u63Rgd`rOP{k_|B8_EjfWO(RD?c zx06-L-sR)o?Kz#E{^m~I5k&5|Z_KxSk#wq-TgHbLiHbW(W&)&Kg3uyf#&Vu*_cXEW z=GtZG7G@!$BNK^8ioX-7P<&LRR^XMw454tK6e z?O8c=i~;o&0=!op)M0hswn%OUe{hThyV??QilAbn?)jASfTuHCh7@&`*-*iQQ)w;E zoA{mYc3D}lAZi(yw9l;Ikc=-Cf?d2ssueF`V=BZTA#{#ui*yJyFVXQbXJ{-D%slP- z%Ynxk;f_IaO>Bl+PABhW`fh#;ee#YqIMkaGKS4G?Dd8DZ%&4OL>f&nTUbuV@* z`HjS2hUVfd<10uxQXIMC=pyiARGy9^h$Ar;lJS+2BXh=@*nLfFX`G7N&56X;xH07M ztd_N?>XQ;DLM}njYwCGfceM8sO%@mBFcJCJ_jI*@B^-oaXnZ1(mopG2$xc3)MJ3Ou zZnuNTz8HXfI|s?B#%;?ab<8kTL-s;)67?WVL%47SY`8ZOIwJ*G(fb5z`dwL7Jli^p zjEx75SI8%Za3zg%D>Ik0*0OK|6GyhaDJ9|VPcmGhxhJLE(weY(QG1G#Sn>L($5KBC zRbxCdqO5B41s^vaJ**VC4-2f_=~g^$LcZQgep|_wlsSe&6fDn?_yAxQJo`hhE!-~= z$MKAXNB*V3(5 zLx5yeVarex z|KByQd?R=MuG)IX4|aC@PmjCV!#;|;);#BGs?S~@O$-+*emC1rqldaK%$m3&BWHlH zmrkWP7OWaqJ~!F|7I+%L7G{xmj5QFt|eC>oi-xueR? zm}f_dBv)PDQn?F?d}xC4brzPD8?urk3vEv#^y}G;QDvvtNctsR0zuhv1lRFDg8!zX zsn9lZcV|xv;Kys!8Zg*jZ3@mopabxw?%-N@J>>?UzM}=80k`SL8n2JY0Y@I=R0`#Kyj4A zRJ4-sYe=Bk@a+eD`S8)Bzy1N|$is1q_es=yD3$Mi$We9jAXbmh5!AlLqdJEYZ^v1J zOpHBRA{&GM{lz+N9;=nVd~b(eQbvk2L}h9(Rg+3F9O{gf^H>|IuxETOhSM4m6YM9cM?KKlm2)o=sDF&0bw zdvKR6jrgHZ<5WU_Fk4&}*|p!-M1DSH3Qh?#@1$os!hZ2woSfH7!`@#X2dfR2H+8n2)A_hIB^ zAZHySMJF)W;L{kX>WepqQye2#ZZhd{(Z>e4=#tcj4M$%p1-A20>VFS@50tHCZB7Z3 ziGHaLQu*t9e7y0!!x+*{AXFK*8yX?Bmv~-?_+~5TwBDbj8jW>5Fg)J7w_JUVBZO#< zN!@HlP7BKBXsxD))oo}#dNWi)ZthX$qDyS4<3U$gf2POul%1(XjB#mxR(#LEMGR_+ zLM4-C)*r-{a@r=!lNUzwZ~TjUM|08iVvn1Ft}asD7jvYft%0PchS5RvbcJ13O`aQj z2=754?+w(-)-cOv8j{|-4C9L7%;0I{Fv422v51u0E3#Oj1D!@_U5^5U+PtVg00-Ou z?<*tkessL4skAe=1cGL-$|>jx)0J$LV(b&B=hM9(uC4SXdlwM*hZ{z1$z2>b{jzQ> z^je8RTI|L*q#BgK^1~*2I9DM*W@hn<)*B-+O&ACtPYk*+7LKRsjM}R^$|RThoGZ#J zL%w8ALm|@m#|C87vd^C>@N+E}w>6s=hj3E3?2qCQ$#7WIAGAK%1_im$=E`ny*l0tg zU2)`QcRu||pFs z(-l2zZ{A*e9s=G)kUCDxbz1N=4(6~zW@%SFXi3zCgsT9-b**sAN9!}W%pLqRVTFCe+(U)Jf?lOkVunvH2Pj<184(8z`cF-WOL?_{uSuR(YD=}N$#*azU$xf;|3Pww91$j$#ZCO z+#KmZJocZ66gBj(a*S17I~A$WOMDI@6y$ZFUV-umA}u#+hkz)Qe#vZP3BYU0o| zTO=;pD58voKjAg(L`TClmh(@%yktw>Fn7&db4q%-2BW7EV3@KZMkoL=|8XHmpnu;m zWu6H9W9~^WkQNeYTd1PN&|CQVNh5~gpxzq1w%S4l&gegbQ&z5UIoP2aJ4I8s3WG@| zaRHGcOjgM$M}pU>3=N4rOC0$08AsD{k*R32u=`}efDXo|$s1_(dL}d;u5V_$>c>ll z5nMJqTRmHPb+hyo)Xzi`%Yg+89!5nozL`htBsv($%zcY$#Q8GW?CD9nF!UGAUe05p zig7e2x;xeS1XD(D;o%{CO0o}hC<^4}YEduFo>qlS=;8ArdE4SBU7g5p%ZZ=NU9Bb# z#I4yc7`fK&6Wm}Ch{1?e*$y}rDs$xq*%^NHyF5iQMU9E#ptzYU1tJSXv#|;v-_!Hs z5nzp~;A4ZCobA8)V(j&emNO#a&n4AG(2aeprV?@)a*!85c2x>21+6UTaeux_ep&Da zfbT7jB?*KG3s%$Q{7?QAQfChPe0IRlqVbl8BFL$sNy<(~eSHu+7f*)vTDsVUNmK2Q zpHxf6{Ko#WLdw1fXf7vm-;ZGUfeQa8Zypsx0Hb*jGS)InX5Bk?{No36yr30$Z_j-i zc7dWq>qTICp?UG8U%BcP!~QKP1>kBA9}(XE63!}t?d)&~-{bX9E~|`WvR?wa!}*{& zhb$EP#)hZ&3jYF9_0d5CJuax_X;U)Y=P}YH?y9RYyEy7@7Kd}8S45QY;3EQs)=eA! z-}>A&Ly_O-A9&>QlZ!Ca?K-mSxtkl|-^;H+sI@3;52lNFhJDVqyIaTuC6d!WlxX+d z9C}-S^x*LkdrrfyYCYhPS#l5914=$87BOBe2PO3= zC;BeO%m2D*oyEvMG+E-+;y@rc#3?-Ihnt!)&U9SB+KT{X$U zM8|6L;sBO5#VbjN;cS*seoWjH%ikc!s#`{&(lec!Nb0D78qp0orUO-y1mk*9m#K$4 zmR=SX7Sx-pf;7;5&cubMB@~r+=|rQv-{CQC|* zno*RRJI+qcU9vV*Hn#B7NT6#a>iQK8lZ5icUZEa@j~y-ajroTA z99SKA=ZI9W-G3-M4+{IQ6Sth0K^0i(tCa)k!Z7oT{pW`F6T<%S^ELubQH(Ux!4%zg zlH00vla7DIEoV2nW6$X!dtrrNtuU-CGjGB{`5x)a0`3wwWm}5~7cB=W>T#|pk_JX4 zGa7pBxf8DqaWWV1%__E8RO?bP6LfkFcPLd)5uIIEiH>=cxkVW@HlPY|*18b8PeWqb zmHm5H&QjsQKiLwd7oSO}UvLgTK7!UCJkxpoNv{C^^bpF5_hb69tc@-TXar*+KEk?hZ3AX8B~LpP_JXINj-OyYM58)lobr=%0D;zThR>h!|tQ zNA2YVPGd%Ay~qE&3FVvbA8vbO7`BSTNUVtmh#rw^=$v(!A@#f;oaCFsG9#-9N(*9Y z79`dmD`>ug-E&E0!8hay9LK+^dkwo2hRB0<`OG3jE$rBA_Yt&UC06!a#%?9E`MdpvVTzcCWwo!;vbj9mxtM%Bj_p&;eY~hufT4i z`?m*i!k^CY?~`M{AF`9pqVOy16JzyCvCXF^L$g2r%wsEgdr1?5-~Wr6tAI$B9(5iY zDN-(w^zTeb&!3^=QPXwaBZZZe=s-N!OnKuOfXafmJ@B-}MG6HrQEa?f~*=x0LmbkZ{Mv*NS{leIE-$jc9U~9f4r% zYv`sq=69Xv7yj@QX}ZEQ^n;z&$_8A$WguG-tVR-+L>Ofg#sZ=g#|crrz;-HcqQgZR_po+=+&JcSj4p1t;o+bRav9vaRs8*W-Gq zp%t8o-FmR^CD>V0$?qIJ!AC1-LIru0=ctcDr*8+Mz7W#teJ?MmF3sAlYMK3qhT5y%ajtr0-e091r z`QmuGcl_QwLIEPP_9&wa?dY=jS_$(ha&yDP$AVloZQtycMsJ|SoIr%uF588&^YPaI z;G&}n{q*fW)e#3Z5x&5}h=Pd(tipjwq$lcuRH(yLw|}rP0YeMOHn}MdM?E%jpT<;} z)?n*v&`H;AS%Z$@MQ@gM3fg%w4&y|;HZ4ICYVgLdu=wurDx{}zIq1se?o`6W?Aq6D zG7BJ?Fw{)qyckYT72;Df|1C*j+QbM|~Pj5#)i>C?fq1dTNw zLPkT!D9AYI6U&X%ZH;LX#YK~a?e&=@eQ!c|?jD0my>Hqo!HaTV-746AX#kZYb};_pT>)+v4}>u+RR4r`NmpUc*@QjuSj7g2!P zBIG*cKJo!6M&nW6p);=VR&J@ySlT0)-!$tM&09HLT& z>|3Xdosyskt*f!4A6LYw0Y+dz^zsYwz1y z6LlD0`1Ea@a*VIyx4QKW1*$8RAY<|G$zSc&_9?k8zE;NUshW1|oBT8pWQ=?Jp$msM zFhCH9e3rleokxK6srVi}%5fK#l-;@3P}w`u&D7g=p6_%Tn3g4ckE^23$KN0fDe8gqHk{~z%=Brk2q*W%_D`seHq z*EYeLSh}KcHT*LA`0rmVxI7RC;KQ*VlfbdA$>g(U#MOUHz%Q&dV8u6H*h(hSJ~^x@ zRf}%LVT(GhL?xg}la8yogHwMK72%2)q*x#EVH{xO4Y@xNuh@i*NNeY`1jV^)WNoXscsV;6U`@`MNa||&QF!80C$G>^XtFJio)H(>D9SY zmlU`UuYRK`Xo>(34j{ws=2FL_g1FPH?C8*cjCi`QF4i}Pgw&k2|Na3c{&Vx|=Aeik z(r@uBR|Qy5on~C>de>;SvRT4gNr(chXaG`eySF~YVvMp&%g#6dyJ@%5bqLW)gD7pS zj_m?aYco5GbnV@~bT9xCq2syj||MWQpo^2^D$0qj_< z!|>Ep$cb`-A^=M8@kroWf9V3;w_qY*0%c$?a43MtqLYE41~%s(i}O2$_4`E?heU_e zZ=?E0^HrGrtjsNKDl%e2g@^pP8AcVEKBz$+7$#wl<Ic#a)Y_3fqLEJSH};fm<0`y0=#@U6m@F| zp!fZze*LQ`s}X!A>`5kMM!rjonzY?BOTyhk?szjo?fjqSU(U;zYn?$IG2cgXwV-Y{ zP03|CU%9nq>LVj!0h84H&sDb8h}C7%?p<+^ zC1_SjP?*jSxurymf3KUqWl-S0REn0B#@ zR4D66VH;HCeJZBS7JU6nLBJ4cF@La0hTbp*1Ni!80ezFlC3IbJa3WK)SYkiu&XH98uUv20paxugysOIR>zHKAe#lKdx#qB{jKTWwn#;yw z+miHHG#(D2>T8dm0)`qV;@N>|TZYL}y4oV*5|3Xt$0NXwLrXzq^Y{AVKk5BuXgOQD z#k&}S`yaFJ9MD69N{~ll0p~|MqrxjI(y%npsp%+iE&BRYMxv|`TL0^$J+Lp9(EtTm zAGQg>FUoP8^VMTKUb%-3kY79 ziwzpy-rmj*s;D#iuv-$Y*?{{^QYi$KsO8FS;5fHolDq%OR%ni6Fqbi{!n=l* z2hzUDHO5XdDNDSQq~e*O_h)}tg~X02E;?gg24a{rym9N&htS4&vHbV6V7b=pD#@o|zqJ&ZzX8pI=GirwdepebY>tF4if2=40=6Z& z*3S~2V^ku+VB`Am-r>gR5L=lkzaG`G@8=}18UC=7zs+vA=u408GOkqdY{RtJQf}K! zehxz-{w(OSPIGCi$&7tK+LI4L>m2`NeYZyP6iOA3zh5R{!%NJ<$$}W%fcC`G@uVkI zN->3|HO=mrLtq+W*%aho9wNojqLxqo4*u14Tf2dtJ`x`RT;)~Lf*zyIm&_D5M5;Gp zyiHiiDW&S~X;^rF$68F{_El++_Vm9exqwE>Ylsc5NF(@_E(Yf`%j^6Xg$jIO(TRgICZ+&KA>1Xs8ePx}fOzG_YX2!WMdpH+>L9pR+ zbg)YZ*?-u{uP0S{)G2t4KJ~4ONB-LV*mX`u_xkFI{Jj^Ffc;F8>}3vWl$vTCzrK3B zF9$M&#XV$RI__`(tLN0w`>7}R)MKklFbze+=Gf%1LEL>JaeO&8UP_>r`{eJErLgo% z#C($2i1$;ut>`2@;RtnyKn$_w3qu+K?HABUsFO#xJE@Bbq?={7~<;ce4Gq*a=|0+kuYGWrv@?-Q-C};IH>#?0c z4GS{4FNnGDv-gDm_1_ZpyE7Sw!CIq_RoZ2VB&0+Nke=Ui^i`~s|B^xvPz zCEPrDk3nl;=75ptm_p~V9t^+kPOQZuZ68&{M0$NaGuZGV$wmLi57i!J(^b1_&z0Ua zG%Nyp`vYwBk{{yC|69o%3>wI23h-z%GHo1+1MqLZsfhyghn_pRyYs4?An}#`%18R zncy5oTu}BnbGGcs=#E#eH-u_!9oNU-gO4vRsc4|({z>_Ht`7DEFM-PcmuDr&2VqDL z?j>MDUB>@XpHw!i$emEeoN^hZ9p%}cGy}i%;9?@$A;3p7aE2R71fM+rQ@QqWwPFL2 zJAK?tZ(Zt6o*d`JN}_$K7|9_fEQHnP2+1%QVLEHB`(MinaF`K>`_dhiY?4905Y~#RRwQ>V>DrI%2HNI*3t+vk{J0HZe*{qv^Oa%S0!%3BV__ptUk6NH} z&o7eYuFi5?A}%q_Hb$!*ckJXWuWXh~eysW$s#w!-b}^#wga20;yS<|QACV71?r?RP zw5?8N5web$AKC#rbA&#((-ba@W`o~H^7D=HrpLei&FXjL5r`p410ATNxM8b->4Pdg zow+!(aUwtJSU+c75B%icUfGYe=@gxkm{LJ4wZavBxgPpBo%2aNF5uKU_X_kwb%=fPA5Zu4-p6?TMLvcnEU)t)v^bXG`4XbQgi9 zheyrC5+r;ZP_6lPRtXaRJra-$nOIxa_!Cfyjj>JBEaf)Jq|J>ge@VzO#SSIP5Dk^v zoc06|$ek^KUkVqUOv;%~AQjpo5r($Td#P^De;p(IQl$$oEd(|6$-D@3+m+$ z*tt6U5p+t4-1O+%UA84ssI^JBEu81z?>9RHOjY_@%y*y4K$D#38MEA@d&sIqu@BTx zXsTae8pDr*8!o|TpF{lXX;sob|8qNQ26Iq`Vi`sLel75_C>+n4Tw5m4K8y-VkITEN zF=g$sdoGr~iYAyyG(qv^L&h|;8~Sl$S(_mBLkh5O`&VZNi&yW6(QH>I zPvl*%re5V@1-mQV!Negr1!3phAJ3YDZZ7AZ-E%u)fx(!o01>tJqibs}g*-3hM~2vY z`^PNP51O044Ate2^`f1x{ZDqtZ{f{+&M1CkjQ}f<=*$QdOZqt2%aWRn6GqX5OeiIK z<(PdF9kH^%*_x|&sWs>!ReGl2mu-L3KM)AjR`if|y1U)LvaueO#^81&02>>@@%>=w zSQxk2cM0dTw&^13_!3RV{6adNr|NJp=XS!ARFz2FM2m$I)^@F+Z7yih--T2gX&Jrp zlL1;b^h)sJ$E}Tz@rFDtZj0-;-n<#yUj?_fV*5O6ro804D#V;s1k9Or`!@UQ_{#-= zVFD)A-RM-!`?!@0HIXF?3kR)s`vS(?V!>uk@Y!2~G*#GehHxSyi#Yci99`itwL!n$7#Nx%Jy_cnNFVc=k*o0JUeN$3W_7y$ z8)m^smIov_@QU0jJ9~}9s(*?)a$z}XDKpeG$U3Kafn9<-;zwU7ef&FW1~+wwFgSSN zbOYR>A**o1#6fXP>73VNnS~V>)U$aXuS%WGruKEUv?J@znjyPd=edIEA~^THYPp-5 zV(Fv2iP>D!pI9_5lfCc8S287iyj^A0mHCJ@ZI6>zX6kPiKw&{zI{5E3yB1>oPiWNL zlPJ?nR>pPXL?GFkp5?0MH2-zsl`^F+a?VFmPW9@$ofLDz^d(%^s%CDer<6$Ik%)m- z?I}p`S)81^thQ7XcR2IbSv6Jvcd!)2gZL0hA@ZdpMdIPX{kGN1w)Y@OdPq>Z^yRhf zIJm9<-eV&zLHT4V{^+Y8LW)FH2_G{s>y__%mmM|X+6pw|9LE2`O)9?-Qftbp_Ep5j zm5DC|MO^jr8tSvneZ*i%H*64k?2O-NKlPW~`+VX&v@^Gvq z5tp>4*h@f6$#R$&OMdl$Zme?wACeY=UU!w^_-zIOBI2Z?iP1xnSv&p1yJ~|K($_a@ z@=^kmdUc`e>z@rL|98luChpM!^>H$3u^{}K7zu=7+y7U66ov}*ySC)SG?<6z!mET4)4opZZ?vn@#PPundsoC`git4Hz#cJ_h>Qu$!Z2yK3GXWYTeyT+X*~rc z9(PB4wW|HKnjn?Zf-1hrq~)oEhqe*!&rzPctuLqXs5nWzTzaM9qYbTYzH4pdTGDT@UB5(W_*zCXAO?2BpXVuDciRT_X6IkroCEaGBZHw_pN>$Z^<pQqzyzSY8{R~n*@0o>htRczJ>mA~bKr3KJRwl+umoFpcW z^3;@)W_B<~5F0aa1GQ0rzs%#T5L!i$s)kee{D;cwA`r6n5Wdre!#$_fF!16pM)|VHu*~S@16ksNg@WIb_WL1$ z*nGxYz2Kg%C=k4)^s~nx(2i>AZS8h% zCJcVRqLPE>UZJ{Kiwrhxna3YuQjgB6u)v(A$^7*9zZPKI4LayfPz&24~*S z)ZbE1Z`4Ix>1TD&NxrPq2MTh_$jC@*hl$xPXo#Kb$<<7?t;km^<=6tWg(VnTd)&c9 zCeK^&Qz;}yZ4wPWhC2BN4WaJbg{eds1O}p48tyf(>>_7V3!G*^GM4*20CbmfMQy*` z`SSfs5V{ZOjFs>%ja#jMz0?!CfK1S7m)=LbCm$}Zt=$%bGvMKIwkCzV9@8cj@0A8z z#zF35NB=aR?)Aw#5tF4(0R9{0A|N785sl)iqDi#yrsFQ#Z+#>{i8iH%?(jkc<<1|E z<1kMTJTy>Sm`)&k+mSr~dfdn0iJQUG*2nMYfoXZ*#J%5Z6|Pt!tf`u#SpLzafT-x# z+IF8%w>$0oQnii|YKk2VfhilxjnRltJ=4WLA$jP)NzP05>x9{!z zC6%wDAy06T7OR|}DDrwv0D2ZeBJ{FH+FSsTvX@Ia6hhD+8&9d(R4H zh;N+!)!9B7+YGQQgwQH=_18q)GW)HS0_u^#;XleVz@mBKqs0IFGHL0H1Z%$EN@wV0 zIeV6zf5TZM$z5mb%I8h+_p)zQZK0EHCy9_9!4HjEoLucMOK6vu)B zoAIdaPs~C%b;sD~fDU7TcYzM?ls;P+ss4LEqvGlvzDE_DqoD4Qaa z%qEinNCyk4i19A?`DMljxR0k^Ry8u$oiyA$hWgst@vGACf0+i3P3wky3)VKK4#ZU% z(iP?0*64U_{f_!LtHZB9;uA>5!qCPneC+pCO%v-FemPFBs-VLt51sqTRV=J&V@?52 zcvS>*QK-)RyAWo0Lr27h1kl}XKA+bD2whA z_40I(3>?4a_wEf_ZT9&7KejykeMQNdxP8gi{G~WpzYxPOF#i%8Ide}ymzY%34QM_X zX@Tj~m8*9Rm+}fn0)7eHN71IyqB1+}5LqQR?@C8}4aKnGVz&u?T7oy3?JfK9x+W-s zhnHR6XQl#< zkwD#4eYVZNaf0dQ_`6Wy%3pR4p9S20A{93)TFg$ra$@g)&q9%n5UEA0T`;+oq8Lz$ zy1+l&cF0|n#&J!Z40*wint(XX`}_w1675upMn;kGxOwsXV6&IyanZ`gKN`=r0Q5=m zba%QjP@es}R$Ee7^j#GTTUzzR6Z}-k5Ki%L#Ha@Z$%`B@53rI)(Tn>85_y(x!dTln z9(!HuZ?gDY8L^z`!qD1yumTD12mx=QGpBU6DaYBII0wZ zDXTG0?3G3;@(Dv$e;^-1nxyV@g^h!msiYmBmjw2Tu`$w`vQz0}!b=fnxM*^iC?v4j z9ecbk!|TQofK5lrO+@C|FIYIL?gMcn=E)>bte1%{BTmGHwMP(HmQPz8J#(nh7X!Km zLq}^l9sg{h&rj5#9^MeHi>><}(**T^b{<0ADV z_MY^W*e}PxCR*lJh?kz5X(t(;ra;hU6rGhw9w6M+P-B!JDfn zealqT#cbNm7xmhG|1xQOr(cNu*BxG7h6^mtT9QjtT=gyHV59obazL33oAdUQrDt7- z2+d9IxK)Ux8)}{F_R(|fBg#)G>r5?_p8&HkAz5z$%))6AEkIpr(lnmh$UlODp}RYd zV`%kw5GjQ8GP4QY_oy}8q*wK07jvF?OT;(w0Q;PR+p%Yi zJI52Z-6VvvQ*wRM1uhV;7-e6jw41WQUQ)0kTK-w?d6WMw%+P7t8Zl9%#XQfBN5@f2 z*rZ2yLBuf?8l|Hkwmhy)!DzhLrB1M{QHN{pEgh< zF}=M)gb?e0=O);l9ZAvaSlKv~+NIp(46$>R%g<+th&2AEAmqhTG*YdyUG>CyYIlUc zMYf^`P7YuC^))q~q}y}Nw9(DA*;?`~`!D?dM=G+9-`OG11aN2P(rrbY_DAN;b(*mO zB&e`A`*>$b!J+`T8rN`< zfDB%1Ns@=|EBU_D{OY*RT4QbL8H<$EmCzVNQzSV>#IyVFXjQe#I0nH$Le1%fURKUf zIOsq#XWdz?a~Ag7^xo_e+-iH29&+L@bq&5es_xnldY`?ppO;Thm#ML$SQM+@?|GsL zV*TW&a@FS}4wcrj2SR2DBSq8CR#@Me*{ccEb7{?zeb2kU8nQz8B<&p>nE(16y6upX z-MwLDiqiGKDe}lPWrwC$JvkmAM%76M-t528_=XpFI%%v)(fteuLv$NtxLEE{aF z6-*Ct$ZROUsqa_~U@#?MMsuhoC(wlv%>-IN&gZ+=8y_(vuYPGzc6`3al?`!v?VSCW z2Rgt{pdQ0i81IY7J%54)1Vb!XLa)l3z4iy*py74l>$L1;l)f=ZA18rI!NJ-7{Jhxz zU&W96H!!)UE~!|A*EU^3OhBc&(QT15_%EWkhF$*V54bk#N1^If&d%+bA$7-eQmYW_ zB|b-z4~*a1Uf&RwaXjY{@ttx`RaAXn-s?1+{O(|b^yiMneWnFZZ1I(O{Z+E7o-W z)cdMS5L?C2@$->4W#bB0B_wjS4p-@trmp{ii3wIRQ!yBGoBpkB^Zylfh{d+u`X{hsgX$~;(gW$;#DA~`ixwA|YEI`ylJo-Z71M>LXtIlJBS6@4Pk zDSq-*ouCIy?(G7qe`jm0X-bpv+0Ac8`bqxRurlxlM$W_A<}vCSD>qHgo1!HJUF_We z=6+(p`vTvwo5a~Mg=`eT^uDP^K(5QEurCz10RJvyaNWhN2t|EkeJ(3}I%i#8dZbPyU>pU4X?6yyos1UzHWlRUjFTW;!I9djgRx%eq#O*4! z);gY8je71a>0jyUCIFX(-3hfqP^3Vn2!{rg>>7}R2btE3b|ZKEZTbP;mZK@SwfD{k z_>P~Y7v{L>AD4XV)2E^p_4g1QPd{*#W zX@c1ZVc+JT=gk8VQ)kv6k)aT$!WGZX>-`ib>@}xfW^mu zQF}Z8SkU%5);Vy}O?Z(|xdPL0+K?G=Sa%f!+xx3$PZ#Q4dA`8?fgCcR!A=iEoAIw( zZMzohB=jSY7u~EimtV_y++<5~fYpZba3TV|Th-3VdNrQRN>1)IeT3p&M1`h6ALm@Y z1P#;rm{eWIFp!4EQCW&xV}rR_6``h?YNl#tSF#&HG~M5J=W7cq{<6+Hh8O17Kl2|r z9@(BKIQfHXzELVL=eV~!*QE^1zS{0UZP(*|OQVNQ$Z;wx93TiZK?y(7BL&Rj@4ez+ z%>?T%zZv4Ow|rT>`LL+`#|X@pBHm z2bK+^nCK!2QlrexBNX+_a=yW2vDxM`7stYRH{&8{kk#z3&VLs)V+J)sF<;M z-3KReiEPZ78g}O4w%cq9`URcnq|m!9u6yAHPU+JH2R2Dt^U0fGFpH=KXE(waR`^%# z-GCF9#*<4DfL~>&dTHQ?3Hsh84eL#LL))}tp(ZF1uZ>f%hgP8}haauQYyu8AHg@(R zzdH9Hf6kqA00G$`Bj*uE=p7~=cB9)%1;A0Kt(mPv0h!kMuB!zq?cBax;r6F?MXq!| zMFWbVRRNwPHEbGUs>pHxc}Xm%XC)VjdfEe4{Kwig;=eFa7r?4SlQJ?bGZ%A%(c9cY zEEKwn39_N_3^;D??T9|a8h2w1t8VkC75mp^Xb=+Z+rRStY@15e(}mVmm~k)ibC1EMKAIj5=!>%u>4$JDMA z)rGwBu_AA(_j=&u?1n;|$Gi~)ciX8UN`^ zdg%qfnGX_CiHVWvj{eaxDf}66Be#9U8+*gjQpcM_Nw?9Y=2b(*EXP5?Sq{}Gf)r0^ zN}H;-MBvwM3CoMKUU?(0Xm%0!$>3A6Cnsbar0p|ok=66lJr7k}3b{C?3aX?Xx6;EB z4)=^FiiX_QBPpTItGMdaewH+H9m5*revdS%A5B%v-F!Zd9^7bR&2wXt8Evz^Y-m`C zly+}o*isgFp$!zJE^mML5C!z+nwd`6BzGSmzNV{~+8n`GsWQUu*R2;gq-aTyXrA#rUXFY( z3s>3+whX9E=|sK!VAXKjGFE}nq@SSgh)SZaqe4^@v$2|L^-;tQlf{X(5jd+4)5BIv=th# zpP3(Abyg{T-Q{?DtrxJk6I5}4BDM?A7dZp0L|Vw#D!&fJk}%=2;0RlZ=bs;(aZFdf zU|;;AXU54SNCL^rBexOQcfEb6Cb#qJUf6$4%G~T`8N#u_?3(H6#@xj`pEr?J^YnyC zeLBY$E4X|Wm4|*>{?;~G_Ovk!)Z;soVJExHX#HpTSqw}iio7aGBc+3aDuZx-UYvJ;AUE9Dza6# zgBv!gvlpzcu0AgLviv_v!l5J>NQ&nut;;PhH|qr1el7sffyDPA@DEed-dDZKi(z!U zes|QC0ryjy8vocglE2c1=gS|;8UEw%n>d*7P!Hm!=B45&lD+wSJaSC)(7Y^I zx*^)$g9C7U;!JED^epXV3JMSQ_eptYiYYMOOLXtb2<1+CSW3&@?rFMrbJK}-a9V(7 zrP3s(;bD22#9<5pKd$j(n?RF@79P-XhO!T5lRaF7tJ4}T)PU%0 z92v55#^yoV-bVI$%noPAc=C3^=+`%FbK|<8P)@`n^PKzolwx&v_@_IidDO^?O)l9Q z@$Dce7h>Y4(B~U=HeVlxJE&ZJFN8)=BzZz{m8VCPj?>aK@NIoh^j{9ZOI@g^t8X6x zF@rfVSqR(Hxe=W$Ubee)ZewdVEpA}^9k)WDa|06K?-T&$M$j1ksrw+JK%jc}7ejm+ zBrKeJDL1U>HE~d4*KxGfSf@N>E4o56Jt^r|0KeW|BqX^c26%JtEt2JAEupSpyl%11 z=WY`6(k`c~QvH$a#9gL6_3gE$`1Z&0FPbf$ccE*%cmI6494|iz+uhk2 zJs@et4`xe<+DGS;_{+?FdMG2ot2>iPq}WB25>~e_ge0rAXW4y*5*%#5yW7**G&J6R ztn0l_Bi`z5ax!#VWpOt* zH>Y)^%02?Ld{34yYEBu4RIpJs^XOF9$!u_Sl7S{d(lVxRMb+P9f=4d`u7tckJehd; zPBP27Oy?J=FN>&a1Z!5QF$+W%c9N(GpnOvk&BTk(@~M$X)o1dG5z|bN6MA+W~%u?uIKdU1m>#b1J|{@ zzHA$|Tv=NhBUU`85-7REa3!-Mn$?IGYP|xol8pupMaV2tnMFL{nN{-@V{K|(A&Yf@ zj7#g6etq|{?0r?-9A`s>T?hAqQvx8=SI7_V4~_!;F71dWVkyC=TbPLfp|G6t^74U} zmX>k+2UTht8yZ5_ljHP=5SNxl`#QKb6oWHJUnv8g|Fr|YUNyT z5Gphb{3CXuF|fAm-5ZX^Ys1k|ba6+;Bz)HWSqGeG#?!uI>C)oQ&9C1fSX&O*8cRw_ zLW(l7Dm|<7W%^wJK>2munp~JM6Kj%z0T_KJ6F?BIGhFG4G9!Q`a=UVECK7YR9R|J{ zy{a3MY!niwe8kOvjfYFUVo8U#WrYC(=}WelMCI2B$^L#JyPrC={x9IH5om&zcQ%GD z>ehT-@nIe!THnkF1y6dP@X^~~IP3g|Raml3Sv=G~cGm;2WQw}K1mh_3zVDcK{smdO zTuWD!-VAP*$s=npW8}#TwXPeJ)NsV$@_#*V`~H17GS#={=V%0GzW(^hOom9Ile_ER zg59+vWQr5mc)^Ew?61ENp*7?;HqzVBrPS3dmCLHQO#i$mzsR|BLM_{)V zbC_Fqi=apJt$8S3V%X>?%c?B% zA$kk#qh(BU~Jt8gzW?M~kk&|3sl^4Rngf$tdnl?IXiYz)Ph6##_HTKeQ|Bc{$o z%?Z1=*bv0NH;0|Hr4+;psO+XdcsYw6M$OTC-ypXog>h)E1k6(pIa>QEoe|7^7J{n0 z@V_1RQFifH2D}#4cQBf%NZjGb>fs+l^d~KXre;J=@O@!vsbzf|!vdDR#B0v~-=l$i ziGRS6%nxaPth-HLqP*ZIpngdW-S8ZF8u583|2|2quKPbbK%|SMNaVR06Mn&wYEgDm zUA58#Ml4-xh%~+;^p9{Uby7Y;VHR*>^A08hDPU|LRk4&=`~z^E6Mi!AUdz6B>iazo zjF)3lpRC4cfwSxjTZsc<1HF~sHD1UM_VNjnyC{X1kGdkze+OsyB}5U*pIT^|pI4av z$dK^YQfjBd>QGb+i>P21+J8jS=%3)F!W^}aNZclR2A-L>Qu$A`Tg4;^CmMbd_D8{l zyXcV+@}Kn-q@79=1kul9Qk3HUHs@J1PLn|Ren9%gKl8i&COYfS%0Cd?p8~-gXl~`- z$tbC01j13^z&vubF}z9^G5_0H-?dEXyKe9U8*c-^b#)KA{}&{CPrPLV%8txG-*5o@ NObpEpp6EG;{SS&-9>)Lx diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/_default/single.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/_default/single.html deleted file mode 100644 index ca2e1c4a65e00..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/layouts/_default/single.html +++ /dev/null @@ -1,38 +0,0 @@ - -{{ partial "header.html" . }} - - - -{{ partial "navbar.html" . }} - -

- -{{ partial "footer.html" . }} - - - diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/index.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/index.html deleted file mode 100644 index 17f0246abf403..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/layouts/index.html +++ /dev/null @@ -1,37 +0,0 @@ - -{{ partial "header.html" . }} - - - -{{ partial "navbar.html" . }} - -
-
- {{ partial "sidebar.html" . }} -
- {{ .Content }} - - -
-
-
- -{{ partial "footer.html" . }} - - - diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/footer.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/footer.html deleted file mode 100644 index 5aaeed9e1ed4a..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/footer.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/header.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/header.html deleted file mode 100644 index 35ba4c8042f94..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/header.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - Documentation for Apache Hadoop Ozone - - - - - - - - diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html deleted file mode 100644 index 3cd8609657838..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html +++ /dev/null @@ -1,36 +0,0 @@ - -
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html deleted file mode 100644 index 7fae50d316a87..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html +++ /dev/null @@ -1,52 +0,0 @@ - - diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css deleted file mode 100644 index 5e39401957d87..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v3.3.7 (http://getbootstrap.com) - * Copyright 2011-2016 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} -/*# sourceMappingURL=bootstrap-theme.min.css.map */ \ No newline at end of file diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map deleted file mode 100644 index 94813e9006074..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"sources":["less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":";;;;AAmBA,YAAA,aAAA,UAAA,aAAA,aAAA,aAME,YAAA,EAAA,KAAA,EAAA,eC2CA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBDvCR,mBAAA,mBAAA,oBAAA,oBAAA,iBAAA,iBAAA,oBAAA,oBAAA,oBAAA,oBAAA,oBAAA,oBCsCA,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBDlCR,qBAAA,sBAAA,sBAAA,uBAAA,mBAAA,oBAAA,sBAAA,uBAAA,sBAAA,uBAAA,sBAAA,uBAAA,+BAAA,gCAAA,6BAAA,gCAAA,gCAAA,gCCiCA,mBAAA,KACQ,WAAA,KDlDV,mBAAA,oBAAA,iBAAA,oBAAA,oBAAA,oBAuBI,YAAA,KAyCF,YAAA,YAEE,iBAAA,KAKJ,aErEI,YAAA,EAAA,IAAA,EAAA,KACA,iBAAA,iDACA,iBAAA,4CAAA,iBAAA,qEAEA,iBAAA,+CCnBF,OAAA,+GH4CA,OAAA,0DACA,kBAAA,SAuC2C,aAAA,QAA2B,aAAA,KArCtE,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAgBN,aEtEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAiBN,aEvEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAkBN,UExEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,gBAAA,gBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,iBAAA,iBAEE,iBAAA,QACA,aAAA,QAMA,mBAAA,0BAAA,yBAAA,0BAAA,yBAAA,yBAAA,oBAAA,2BAAA,0BAAA,2BAAA,0BAAA,0BAAA,6BAAA,oCAAA,mCAAA,oCAAA,mCAAA,mCAME,iBAAA,QACA,iBAAA,KAmBN,aEzEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAoBN,YE1EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,kBAAA,kBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,mBAAA,mBAEE,iBAAA,QACA,aAAA,QAMA,qBAAA,4BAAA,2BAAA,4BAAA,2BAAA,2BAAA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,+BAAA,sCAAA,qCAAA,sCAAA,qCAAA,qCAME,iBAAA,QACA,iBAAA,KA2BN,eAAA,WClCE,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBD2CV,0BAAA,0BE3FI,iBAAA,QACA,iBAAA,oDACA,iBAAA,+CAAA,iBAAA,wEACA,iBAAA,kDACA,OAAA,+GF0FF,kBAAA,SAEF,yBAAA,+BAAA,+BEhGI,iBAAA,QACA,iBAAA,oDACA,iBAAA,+CAAA,iBAAA,wEACA,iBAAA,kDACA,OAAA,+GFgGF,kBAAA,SASF,gBE7GI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,OAAA,0DCnBF,kBAAA,SH+HA,cAAA,ICjEA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBD6DV,sCAAA,oCE7GI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD2CF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBD0EV,cAAA,iBAEE,YAAA,EAAA,IAAA,EAAA,sBAIF,gBEhII,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,OAAA,0DCnBF,kBAAA,SHkJA,cAAA,IAHF,sCAAA,oCEhII,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD2CF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBDgFV,8BAAA,iCAYI,YAAA,EAAA,KAAA,EAAA,gBAKJ,qBAAA,kBAAA,mBAGE,cAAA,EAqBF,yBAfI,mDAAA,yDAAA,yDAGE,MAAA,KE7JF,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,UFqKJ,OACE,YAAA,EAAA,IAAA,EAAA,qBC3HA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,gBDsIV,eEtLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAKF,YEvLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAMF,eExLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAOF,cEzLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAeF,UEjMI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFuMJ,cE3MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFwMJ,sBE5MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFyMJ,mBE7MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF0MJ,sBE9MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF2MJ,qBE/MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+MJ,sBElLI,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKFyLJ,YACE,cAAA,IC9KA,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBDgLV,wBAAA,8BAAA,8BAGE,YAAA,EAAA,KAAA,EAAA,QEnOE,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiOF,aAAA,QALF,+BAAA,qCAAA,qCAQI,YAAA,KAUJ,OCnME,mBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,EAAA,IAAA,IAAA,gBD4MV,8BE5PI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFyPJ,8BE7PI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF0PJ,8BE9PI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF2PJ,2BE/PI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF4PJ,8BEhQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF6PJ,6BEjQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoQJ,MExQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFsQF,aAAA,QC3NA,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,EAAA,IAAA,EAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,EAAA,IAAA,EAAA","sourcesContent":["/*!\n * Bootstrap v3.3.7 (http://getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n text-shadow: 0 -1px 0 rgba(0,0,0,.2);\n @shadow: inset 0 1px 0 rgba(255,255,255,.15), 0 1px 1px rgba(0,0,0,.075);\n .box-shadow(@shadow);\n\n // Reset the shadow\n &:active,\n &.active {\n .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n }\n\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n .box-shadow(none);\n }\n\n .badge {\n text-shadow: none;\n }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners; see https://github.com/twbs/bootstrap/issues/10620\n background-repeat: repeat-x;\n border-color: darken(@btn-color, 14%);\n\n &:hover,\n &:focus {\n background-color: darken(@btn-color, 12%);\n background-position: 0 -15px;\n }\n\n &:active,\n &.active {\n background-color: darken(@btn-color, 12%);\n border-color: darken(@btn-color, 14%);\n }\n\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n &,\n &:hover,\n &:focus,\n &.focus,\n &:active,\n &.active {\n background-color: darken(@btn-color, 12%);\n background-image: none;\n }\n }\n}\n\n// Common styles\n.btn {\n // Remove the gradient for the pressed/active state\n &:active,\n &.active {\n background-image: none;\n }\n}\n\n// Apply the mixin to the buttons\n.btn-default { .btn-styles(@btn-default-bg); text-shadow: 0 1px 0 #fff; border-color: #ccc; }\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger { .btn-styles(@btn-danger-bg); }\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n border-radius: @navbar-border-radius;\n @shadow: inset 0 1px 0 rgba(255,255,255,.15), 0 1px 5px rgba(0,0,0,.075);\n .box-shadow(@shadow);\n\n .navbar-nav > .open > a,\n .navbar-nav > .active > a {\n #gradient > .vertical(@start-color: darken(@navbar-default-link-active-bg, 5%); @end-color: darken(@navbar-default-link-active-bg, 2%));\n .box-shadow(inset 0 3px 9px rgba(0,0,0,.075));\n }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n text-shadow: 0 1px 0 rgba(255,255,255,.25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered; see https://github.com/twbs/bootstrap/issues/10257\n border-radius: @navbar-border-radius;\n .navbar-nav > .open > a,\n .navbar-nav > .active > a {\n #gradient > .vertical(@start-color: @navbar-inverse-link-active-bg; @end-color: lighten(@navbar-inverse-link-active-bg, 2.5%));\n .box-shadow(inset 0 3px 9px rgba(0,0,0,.25));\n }\n\n .navbar-brand,\n .navbar-nav > li > a {\n text-shadow: 0 -1px 0 rgba(0,0,0,.25);\n }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n border-radius: 0;\n}\n\n// Fix active state of dropdown items in collapsed mode\n@media (max-width: @grid-float-breakpoint-max) {\n .navbar .navbar-nav .open .dropdown-menu > .active > a {\n &,\n &:hover,\n &:focus {\n color: #fff;\n #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n }\n }\n}\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n text-shadow: 0 1px 0 rgba(255,255,255,.2);\n @shadow: inset 0 1px 0 rgba(255,255,255,.25), 0 1px 2px rgba(0,0,0,.05);\n .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success { .alert-styles(@alert-success-bg); }\n.alert-info { .alert-styles(@alert-info-bg); }\n.alert-warning { .alert-styles(@alert-warning-bg); }\n.alert-danger { .alert-styles(@alert-danger-bg); }\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger { .progress-bar-styles(@progress-bar-danger-bg); }\n\n// Reset the striped class because our mixins don't do multiple gradients and\n// the above custom styles override the new `.progress-bar-striped` in v3.2.0.\n.progress-bar-striped {\n #gradient > .striped();\n}\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n border-radius: @border-radius-base;\n .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n border-color: darken(@list-group-active-border, 7.5%);\n\n .badge {\n text-shadow: none;\n }\n}\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n .box-shadow(0 1px 2px rgba(0,0,0,.05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n border-color: darken(@well-bg, 10%);\n @shadow: inset 0 1px 3px rgba(0,0,0,.05), 0 1px 0 rgba(255,255,255,.1);\n .box-shadow(@shadow);\n}\n","// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n -webkit-animation: @animation;\n -o-animation: @animation;\n animation: @animation;\n}\n.animation-name(@name) {\n -webkit-animation-name: @name;\n animation-name: @name;\n}\n.animation-duration(@duration) {\n -webkit-animation-duration: @duration;\n animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n -webkit-animation-timing-function: @timing-function;\n animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n -webkit-animation-delay: @delay;\n animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n -webkit-animation-iteration-count: @iteration-count;\n animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n -webkit-animation-direction: @direction;\n animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n -webkit-animation-fill-mode: @fill-mode;\n animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n -webkit-backface-visibility: @visibility;\n -moz-backface-visibility: @visibility;\n backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n -webkit-box-sizing: @boxmodel;\n -moz-box-sizing: @boxmodel;\n box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n -webkit-column-count: @column-count;\n -moz-column-count: @column-count;\n column-count: @column-count;\n -webkit-column-gap: @column-gap;\n -moz-column-gap: @column-gap;\n column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n word-wrap: break-word;\n -webkit-hyphens: @mode;\n -moz-hyphens: @mode;\n -ms-hyphens: @mode; // IE10+\n -o-hyphens: @mode;\n hyphens: @mode;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n // Firefox\n &::-moz-placeholder {\n color: @color;\n opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n }\n &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n &::-webkit-input-placeholder { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n -webkit-transform: scale(@ratio);\n -ms-transform: scale(@ratio); // IE9 only\n -o-transform: scale(@ratio);\n transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n -webkit-transform: scale(@ratioX, @ratioY);\n -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n -o-transform: scale(@ratioX, @ratioY);\n transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n -webkit-transform: scaleX(@ratio);\n -ms-transform: scaleX(@ratio); // IE9 only\n -o-transform: scaleX(@ratio);\n transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n -webkit-transform: scaleY(@ratio);\n -ms-transform: scaleY(@ratio); // IE9 only\n -o-transform: scaleY(@ratio);\n transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n -webkit-transform: skewX(@x) skewY(@y);\n -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n -o-transform: skewX(@x) skewY(@y);\n transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n -webkit-transform: translate(@x, @y);\n -ms-transform: translate(@x, @y); // IE9 only\n -o-transform: translate(@x, @y);\n transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n -webkit-transform: translate3d(@x, @y, @z);\n transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n -webkit-transform: rotate(@degrees);\n -ms-transform: rotate(@degrees); // IE9 only\n -o-transform: rotate(@degrees);\n transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n -webkit-transform: rotateX(@degrees);\n -ms-transform: rotateX(@degrees); // IE9 only\n -o-transform: rotateX(@degrees);\n transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n -webkit-transform: rotateY(@degrees);\n -ms-transform: rotateY(@degrees); // IE9 only\n -o-transform: rotateY(@degrees);\n transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n -webkit-perspective: @perspective;\n -moz-perspective: @perspective;\n perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n -webkit-perspective-origin: @perspective;\n -moz-perspective-origin: @perspective;\n perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n -webkit-transform-origin: @origin;\n -moz-transform-origin: @origin;\n -ms-transform-origin: @origin; // IE9 only\n transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n -webkit-transition: @transition;\n -o-transition: @transition;\n transition: @transition;\n}\n.transition-property(@transition-property) {\n -webkit-transition-property: @transition-property;\n transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n -webkit-transition-delay: @transition-delay;\n transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n -webkit-transition-duration: @transition-duration;\n transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n -webkit-transition-timing-function: @timing-function;\n transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n -webkit-transition: -webkit-transform @transition;\n -moz-transition: -moz-transform @transition;\n -o-transition: -o-transform @transition;\n transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n -webkit-user-select: @select;\n -moz-user-select: @select;\n -ms-user-select: @select; // IE10+\n user-select: @select;\n}\n","// Gradients\n\n#gradient {\n\n // Horizontal gradient, from left to right\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n background-repeat: repeat-x;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down\n }\n\n // Vertical gradient, from top to bottom\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n background-repeat: repeat-x;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down\n }\n\n .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n background-repeat: repeat-x;\n background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n }\n .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n background-repeat: no-repeat;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n }\n .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-repeat: no-repeat;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n }\n .radial(@inner-color: #555; @outer-color: #333) {\n background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n background-image: radial-gradient(circle, @inner-color, @outer-color);\n background-repeat: no-repeat;\n }\n .striped(@color: rgba(255,255,255,.15); @angle: 45deg) {\n background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n }\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n"]} \ No newline at end of file diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css deleted file mode 100644 index ed3905e0e0c91..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v3.3.7 (http://getbootstrap.com) - * Copyright 2011-2016 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;filter:alpha(opacity=0);opacity:0;line-break:auto}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);line-break:auto}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} -/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map deleted file mode 100644 index 6c7fa40b98db0..0000000000000 --- a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"sources":["less/normalize.less","less/print.less","bootstrap.css","dist/css/bootstrap.css","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","less/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":";;;;4EAQA,KACE,YAAA,WACA,yBAAA,KACA,qBAAA,KAOF,KACE,OAAA,EAaF,QAAA,MAAA,QAAA,WAAA,OAAA,OAAA,OAAA,OAAA,KAAA,KAAA,IAAA,QAAA,QAaE,QAAA,MAQF,MAAA,OAAA,SAAA,MAIE,QAAA,aACA,eAAA,SAQF,sBACE,QAAA,KACA,OAAA,EAQF,SAAA,SAEE,QAAA,KAUF,EACE,iBAAA,YAQF,SAAA,QAEE,QAAA,EAUF,YACE,cAAA,IAAA,OAOF,EAAA,OAEE,YAAA,IAOF,IACE,WAAA,OAQF,GACE,OAAA,MAAA,EACA,UAAA,IAOF,KACE,MAAA,KACA,WAAA,KAOF,MACE,UAAA,IAOF,IAAA,IAEE,SAAA,SACA,UAAA,IACA,YAAA,EACA,eAAA,SAGF,IACE,IAAA,MAGF,IACE,OAAA,OAUF,IACE,OAAA,EAOF,eACE,SAAA,OAUF,OACE,OAAA,IAAA,KAOF,GACE,OAAA,EAAA,mBAAA,YAAA,gBAAA,YACA,WAAA,YAOF,IACE,SAAA,KAOF,KAAA,IAAA,IAAA,KAIE,YAAA,UAAA,UACA,UAAA,IAkBF,OAAA,MAAA,SAAA,OAAA,SAKE,OAAA,EACA,KAAA,QACA,MAAA,QAOF,OACE,SAAA,QAUF,OAAA,OAEE,eAAA,KAWF,OAAA,wBAAA,kBAAA,mBAIE,mBAAA,OACA,OAAA,QAOF,iBAAA,qBAEE,OAAA,QAOF,yBAAA,wBAEE,QAAA,EACA,OAAA,EAQF,MACE,YAAA,OAWF,qBAAA,kBAEE,mBAAA,WAAA,gBAAA,WAAA,WAAA,WACA,QAAA,EASF,8CAAA,8CAEE,OAAA,KAQF,mBACE,mBAAA,YACA,gBAAA,YAAA,WAAA,YAAA,mBAAA,UASF,iDAAA,8CAEE,mBAAA,KAOF,SACE,QAAA,MAAA,OAAA,MACA,OAAA,EAAA,IACA,OAAA,IAAA,MAAA,OAQF,OACE,QAAA,EACA,OAAA,EAOF,SACE,SAAA,KAQF,SACE,YAAA,IAUF,MACE,eAAA,EACA,gBAAA,SAGF,GAAA,GAEE,QAAA,uFCjUF,aA7FI,EAAA,OAAA,QAGI,MAAA,eACA,YAAA,eACA,WAAA,cAAA,mBAAA,eACA,WAAA,eAGJ,EAAA,UAEI,gBAAA,UAGJ,cACI,QAAA,KAAA,WAAA,IAGJ,kBACI,QAAA,KAAA,YAAA,IAKJ,6BAAA,mBAEI,QAAA,GAGJ,WAAA,IAEI,OAAA,IAAA,MAAA,KC4KL,kBAAA,MDvKK,MC0KL,QAAA,mBDrKK,IE8KN,GDLC,kBAAA,MDrKK,ICwKL,UAAA,eCUD,GF5KM,GE2KN,EF1KM,QAAA,ECuKL,OAAA,ECSD,GF3KM,GCsKL,iBAAA,MD/JK,QCkKL,QAAA,KCSD,YFtKU,oBCiKT,iBAAA,eD7JK,OCgKL,OAAA,IAAA,MAAA,KD5JK,OC+JL,gBAAA,mBCSD,UFpKU,UC+JT,iBAAA,eDzJS,mBEkKV,mBDLC,OAAA,IAAA,MAAA,gBEjPD,WACA,YAAA,uBFsPD,IAAA,+CE7OC,IAAK,sDAAuD,4BAA6B,iDAAkD,gBAAiB,gDAAiD,eAAgB,+CAAgD,mBAAoB,2EAA4E,cAE7W,WACA,SAAA,SACA,IAAA,IACA,QAAA,aACA,YAAA,uBACA,WAAA,OACA,YAAA,IACA,YAAA,EAIkC,uBAAA,YAAW,wBAAA,UACX,2BAAW,QAAA,QAEX,uBDuPlC,QAAS,QCtPyB,sBFiPnC,uBEjP8C,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,qBAAW,QAAA,QACX,0BAAW,QAAA,QACX,qBAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,sBAAW,QAAA,QACX,yBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,+BAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,gCAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,gCAAW,QAAA,QACX,gCAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,0BAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,mCAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,sBAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,0BAAW,QAAA,QACX,4BAAW,QAAA,QACX,qCAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,mCAAW,QAAA,QACX,uCAAW,QAAA,QACX,gCAAW,QAAA,QACX,oCAAW,QAAA,QACX,qCAAW,QAAA,QACX,yCAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,iCAAW,QAAA,QACX,oCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,qBAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QASX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,+BAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,mCAAW,QAAA,QACX,4BAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,kCAAW,QAAA,QACX,mCAAW,QAAA,QACX,sCAAW,QAAA,QACX,0CAAW,QAAA,QACX,oCAAW,QAAA,QACX,wCAAW,QAAA,QACX,qCAAW,QAAA,QACX,iCAAW,QAAA,QACX,gCAAW,QAAA,QACX,kCAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QCtS/C,0BCgEE,QAAA,QHi+BF,EDNC,mBAAA,WGxhCI,gBAAiB,WFiiCZ,WAAY,WGl+BZ,OADL,QJg+BJ,mBAAA,WGthCI,gBAAiB,WACpB,WAAA,WHyhCD,KGrhCC,UAAW,KAEX,4BAAA,cAEA,KACA,YAAA,iBAAA,UAAA,MAAA,WHuhCD,UAAA,KGnhCC,YAAa,WF4hCb,MAAO,KACP,iBAAkB,KExhClB,OADA,MAEA,OHqhCD,SG/gCC,YAAa,QACb,UAAA,QACA,YAAA,QAEA,EFwhCA,MAAO,QEthCL,gBAAA,KAIF,QH8gCD,QKjkCC,MAAA,QACA,gBAAA,UF6DF,QACE,QAAA,IAAA,KAAA,yBHygCD,eAAA,KGlgCC,OHqgCD,OAAA,ECSD,IACE,eAAgB,ODDjB,4BM/kCC,0BLklCF,gBKnlCE,iBADA,eH4EA,QAAS,MACT,UAAA,KHugCD,OAAA,KGhgCC,aACA,cAAA,IAEA,eACA,QAAA,aC6FA,UAAA,KACK,OAAA,KACG,QAAA,IEvLR,YAAA,WACA,iBAAA,KACA,OAAA,IAAA,MAAA,KN+lCD,cAAA,IGjgCC,mBAAoB,IAAI,IAAI,YAC5B,cAAA,IAAA,IAAA,YHmgCD,WAAA,IAAA,IAAA,YG5/BC,YACA,cAAA,IAEA,GH+/BD,WAAA,KGv/BC,cAAe,KACf,OAAA,EACA,WAAA,IAAA,MAAA,KAEA,SACA,SAAA,SACA,MAAA,IACA,OAAA,IACA,QAAA,EHy/BD,OAAA,KGj/BC,SAAA,OF0/BA,KAAM,cEx/BJ,OAAA,EAEA,0BACA,yBACA,SAAA,OACA,MAAA,KHm/BH,OAAA,KGx+BC,OAAQ,EACR,SAAA,QH0+BD,KAAA,KCSD,cACE,OAAQ,QAQV,IACA,IMlpCE,IACA,IACA,IACA,INwoCF,GACA,GACA,GACA,GACA,GACA,GDAC,YAAA,QOlpCC,YAAa,IN2pCb,YAAa,IACb,MAAO,QAoBT,WAZA,UAaA,WAZA,UM5pCI,WN6pCJ,UM5pCI,WN6pCJ,UM5pCI,WN6pCJ,UDMC,WCLD,UACA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SMppCE,YAAa,INwqCb,YAAa,EACb,MAAO,KAGT,IMxqCE,IAJF,IN2qCA,GAEA,GDLC,GCSC,WAAY,KACZ,cAAe,KASjB,WANA,UDCC,WCCD,UM5qCA,WN8qCA,UACA,UANA,SM5qCI,UN8qCJ,SM3qCA,UN6qCA,SAQE,UAAW,IAGb,IMprCE,IAJF,INurCA,GAEA,GDLC,GCSC,WAAY,KACZ,cAAe,KASjB,WANA,UDCC,WCCD,UMvrCA,WNyrCA,UACA,UANA,SMxrCI,UN0rCJ,SMtrCA,UNwrCA,SMxrCU,UAAA,IACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KAOR,IADF,GPssCC,UAAA,KCSD,EMzsCE,OAAA,EAAA,EAAA,KAEA,MPosCD,cAAA,KO/rCC,UAAW,KAwOX,YAAa,IA1OX,YAAA,IPssCH,yBO7rCC,MNssCE,UAAW,MMjsCf,OAAA,MAEE,UAAA,IAKF,MP0rCC,KO1rCsB,QAAA,KP6rCtB,iBAAA,QO5rCsB,WP+rCtB,WAAA,KO9rCsB,YPisCtB,WAAA,MOhsCsB,aPmsCtB,WAAA,OOlsCsB,cPqsCtB,WAAA,QOlsCsB,aPqsCtB,YAAA,OOpsCsB,gBPusCtB,eAAA,UOtsCsB,gBPysCtB,eAAA,UOrsCC,iBPwsCD,eAAA,WQ3yCC,YR8yCD,MAAA,KCSD,cOpzCI,MAAA,QAHF,qBDwGF,qBP6sCC,MAAA,QCSD,cO3zCI,MAAA,QAHF,qBD2GF,qBPitCC,MAAA,QCSD,WOl0CI,MAAA,QAHF,kBD8GF,kBPqtCC,MAAA,QCSD,cOz0CI,MAAA,QAHF,qBDiHF,qBPytCC,MAAA,QCSD,aOh1CI,MAAA,QDwHF,oBAHF,oBExHE,MAAA,QACA,YR01CA,MAAO,KQx1CL,iBAAA,QAHF,mBF8HF,mBP2tCC,iBAAA,QCSD,YQ/1CI,iBAAA,QAHF,mBFiIF,mBP+tCC,iBAAA,QCSD,SQt2CI,iBAAA,QAHF,gBFoIF,gBPmuCC,iBAAA,QCSD,YQ72CI,iBAAA,QAHF,mBFuIF,mBPuuCC,iBAAA,QCSD,WQp3CI,iBAAA,QF6IF,kBADF,kBAEE,iBAAA,QPsuCD,aO7tCC,eAAgB,INsuChB,OAAQ,KAAK,EAAE,KMpuCf,cAAA,IAAA,MAAA,KAFF,GPkuCC,GCSC,WAAY,EACZ,cAAe,KM9tCf,MP0tCD,MO3tCD,MAPI,MASF,cAAA,EAIF,eALE,aAAA,EACA,WAAA,KPkuCD,aO9tCC,aAAc,EAKZ,YAAA,KACA,WAAA,KP6tCH,gBOvtCC,QAAS,aACT,cAAA,IACA,aAAA,IAEF,GNguCE,WAAY,EM9tCZ,cAAA,KAGA,GADF,GP0tCC,YAAA,WOttCC,GPytCD,YAAA,IOnnCD,GAvFM,YAAA,EAEA,yBACA,kBGtNJ,MAAA,KACA,MAAA,MACA,SAAA,OVq6CC,MAAA,KO7nCC,WAAY,MAhFV,cAAA,SPgtCH,YAAA,OOtsCD,kBNgtCE,YAAa,OM1sCjB,0BPssCC,YOrsCC,OAAA,KA9IqB,cAAA,IAAA,OAAA,KAmJvB,YACE,UAAA,IACA,eAAA,UAEA,WPssCD,QAAA,KAAA,KOjsCG,OAAA,EAAA,EAAA,KN0sCF,UAAW,OACX,YAAa,IAAI,MAAM,KMptCzB,yBP+sCC,wBO/sCD,yBNytCE,cAAe,EMnsCb,kBAFA,kBACA,iBPksCH,QAAA,MO/rCG,UAAA,INwsCF,YAAa,WACb,MAAO,KMhsCT,yBP2rCC,yBO3rCD,wBAEE,QAAA,cAEA,oBACA,sBACA,cAAA,KP6rCD,aAAA,EOvrCG,WAAA,MNgsCF,aAAc,IAAI,MAAM,KACxB,YAAa,EMhsCX,kCNksCJ,kCMnsCe,iCACX,oCNmsCJ,oCDLC,mCCUC,QAAS,GMjsCX,iCNmsCA,iCMzsCM,gCAOJ,mCNmsCF,mCDLC,kCO7rCC,QAAA,cPksCD,QWv+CC,cAAe,KVg/Cf,WAAY,OACZ,YAAa,WU7+Cb,KXy+CD,IWr+CD,IACE,KACA,YAAA,MAAA,OAAA,SAAA,cAAA,UAEA,KACA,QAAA,IAAA,IXu+CD,UAAA,IWn+CC,MAAO,QACP,iBAAA,QACA,cAAA,IAEA,IACA,QAAA,IAAA,IACA,UAAA,IV4+CA,MU5+CA,KXq+CD,iBAAA,KW3+CC,cAAe,IASb,mBAAA,MAAA,EAAA,KAAA,EAAA,gBACA,WAAA,MAAA,EAAA,KAAA,EAAA,gBAEA,QV6+CF,QU7+CE,EXq+CH,UAAA,KWh+CC,YAAa,IACb,mBAAA,KACA,WAAA,KAEA,IACA,QAAA,MACA,QAAA,MACA,OAAA,EAAA,EAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,WAAA,UXk+CD,UAAA,WW7+CC,iBAAkB,QAehB,OAAA,IAAA,MAAA,KACA,cAAA,IAEA,SACA,QAAA,EACA,UAAA,QXi+CH,MAAA,QW59CC,YAAa,SACb,iBAAA,YACA,cAAA,EC1DF,gBCHE,WAAA,MACA,WAAA,OAEA,Wb8hDD,cAAA,KYxhDC,aAAA,KAqEA,aAAc,KAvEZ,YAAA,KZ+hDH,yBY1hDC,WAkEE,MAAO,OZ69CV,yBY5hDC,WA+DE,MAAO,OZk+CV,0BYzhDC,WCvBA,MAAA,QAGA,iBbmjDD,cAAA,KYthDC,aAAc,KCvBd,aAAA,KACA,YAAA,KCAE,KACE,aAAA,MAEA,YAAA,MAGA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UdgjDL,SAAA,SchiDG,WAAA,IACE,cAAA,KdkiDL,aAAA,Kc1hDG,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Ud6hDH,MAAA,Kc7hDG,WdgiDH,MAAA,KchiDG,WdmiDH,MAAA,acniDG,WdsiDH,MAAA,actiDG,UdyiDH,MAAA,IcziDG,Ud4iDH,MAAA,ac5iDG,Ud+iDH,MAAA,ac/iDG,UdkjDH,MAAA,IcljDG,UdqjDH,MAAA,acrjDG,UdwjDH,MAAA,acxjDG,Ud2jDH,MAAA,Ic3jDG,Ud8jDH,MAAA,ac/iDG,UdkjDH,MAAA,YcljDG,gBdqjDH,MAAA,KcrjDG,gBdwjDH,MAAA,acxjDG,gBd2jDH,MAAA,ac3jDG,ed8jDH,MAAA,Ic9jDG,edikDH,MAAA,acjkDG,edokDH,MAAA,acpkDG,edukDH,MAAA,IcvkDG,ed0kDH,MAAA,ac1kDG,ed6kDH,MAAA,ac7kDG,edglDH,MAAA,IchlDG,edmlDH,MAAA,ac9kDG,edilDH,MAAA,YchmDG,edmmDH,MAAA,KcnmDG,gBdsmDH,KAAA,KctmDG,gBdymDH,KAAA,aczmDG,gBd4mDH,KAAA,ac5mDG,ed+mDH,KAAA,Ic/mDG,edknDH,KAAA,aclnDG,edqnDH,KAAA,acrnDG,edwnDH,KAAA,IcxnDG,ed2nDH,KAAA,ac3nDG,ed8nDH,KAAA,ac9nDG,edioDH,KAAA,IcjoDG,edooDH,KAAA,ac/nDG,edkoDH,KAAA,YcnnDG,edsnDH,KAAA,KctnDG,kBdynDH,YAAA,KcznDG,kBd4nDH,YAAA,ac5nDG,kBd+nDH,YAAA,ac/nDG,iBdkoDH,YAAA,IcloDG,iBdqoDH,YAAA,acroDG,iBdwoDH,YAAA,acxoDG,iBd2oDH,YAAA,Ic3oDG,iBd8oDH,YAAA,ac9oDG,iBdipDH,YAAA,acjpDG,iBdopDH,YAAA,IcppDG,iBdupDH,YAAA,acvpDG,iBd0pDH,YAAA,Yc5rDG,iBACE,YAAA,EAOJ,yBACE,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Ud0rDD,MAAA,Kc1rDC,Wd6rDD,MAAA,Kc7rDC,WdgsDD,MAAA,achsDC,WdmsDD,MAAA,acnsDC,UdssDD,MAAA,IctsDC,UdysDD,MAAA,aczsDC,Ud4sDD,MAAA,ac5sDC,Ud+sDD,MAAA,Ic/sDC,UdktDD,MAAA,acltDC,UdqtDD,MAAA,acrtDC,UdwtDD,MAAA,IcxtDC,Ud2tDD,MAAA,ac5sDC,Ud+sDD,MAAA,Yc/sDC,gBdktDD,MAAA,KcltDC,gBdqtDD,MAAA,acrtDC,gBdwtDD,MAAA,acxtDC,ed2tDD,MAAA,Ic3tDC,ed8tDD,MAAA,ac9tDC,ediuDD,MAAA,acjuDC,edouDD,MAAA,IcpuDC,eduuDD,MAAA,acvuDC,ed0uDD,MAAA,ac1uDC,ed6uDD,MAAA,Ic7uDC,edgvDD,MAAA,ac3uDC,ed8uDD,MAAA,Yc7vDC,edgwDD,MAAA,KchwDC,gBdmwDD,KAAA,KcnwDC,gBdswDD,KAAA,actwDC,gBdywDD,KAAA,aczwDC,ed4wDD,KAAA,Ic5wDC,ed+wDD,KAAA,ac/wDC,edkxDD,KAAA,aclxDC,edqxDD,KAAA,IcrxDC,edwxDD,KAAA,acxxDC,ed2xDD,KAAA,ac3xDC,ed8xDD,KAAA,Ic9xDC,ediyDD,KAAA,ac5xDC,ed+xDD,KAAA,YchxDC,edmxDD,KAAA,KcnxDC,kBdsxDD,YAAA,KctxDC,kBdyxDD,YAAA,aczxDC,kBd4xDD,YAAA,ac5xDC,iBd+xDD,YAAA,Ic/xDC,iBdkyDD,YAAA,aclyDC,iBdqyDD,YAAA,acryDC,iBdwyDD,YAAA,IcxyDC,iBd2yDD,YAAA,ac3yDC,iBd8yDD,YAAA,ac9yDC,iBdizDD,YAAA,IcjzDC,iBdozDD,YAAA,acpzDC,iBduzDD,YAAA,YY9yDD,iBE3CE,YAAA,GAQF,yBACE,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Udw1DD,MAAA,Kcx1DC,Wd21DD,MAAA,Kc31DC,Wd81DD,MAAA,ac91DC,Wdi2DD,MAAA,acj2DC,Udo2DD,MAAA,Icp2DC,Udu2DD,MAAA,acv2DC,Ud02DD,MAAA,ac12DC,Ud62DD,MAAA,Ic72DC,Udg3DD,MAAA,ach3DC,Udm3DD,MAAA,acn3DC,Uds3DD,MAAA,Ict3DC,Udy3DD,MAAA,ac12DC,Ud62DD,MAAA,Yc72DC,gBdg3DD,MAAA,Kch3DC,gBdm3DD,MAAA,acn3DC,gBds3DD,MAAA,act3DC,edy3DD,MAAA,Icz3DC,ed43DD,MAAA,ac53DC,ed+3DD,MAAA,ac/3DC,edk4DD,MAAA,Icl4DC,edq4DD,MAAA,acr4DC,edw4DD,MAAA,acx4DC,ed24DD,MAAA,Ic34DC,ed84DD,MAAA,acz4DC,ed44DD,MAAA,Yc35DC,ed85DD,MAAA,Kc95DC,gBdi6DD,KAAA,Kcj6DC,gBdo6DD,KAAA,acp6DC,gBdu6DD,KAAA,acv6DC,ed06DD,KAAA,Ic16DC,ed66DD,KAAA,ac76DC,edg7DD,KAAA,ach7DC,edm7DD,KAAA,Icn7DC,eds7DD,KAAA,act7DC,edy7DD,KAAA,acz7DC,ed47DD,KAAA,Ic57DC,ed+7DD,KAAA,ac17DC,ed67DD,KAAA,Yc96DC,edi7DD,KAAA,Kcj7DC,kBdo7DD,YAAA,Kcp7DC,kBdu7DD,YAAA,acv7DC,kBd07DD,YAAA,ac17DC,iBd67DD,YAAA,Ic77DC,iBdg8DD,YAAA,ach8DC,iBdm8DD,YAAA,acn8DC,iBds8DD,YAAA,Ict8DC,iBdy8DD,YAAA,acz8DC,iBd48DD,YAAA,ac58DC,iBd+8DD,YAAA,Ic/8DC,iBdk9DD,YAAA,acl9DC,iBdq9DD,YAAA,YYz8DD,iBE9CE,YAAA,GAQF,0BACE,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Uds/DD,MAAA,Kct/DC,Wdy/DD,MAAA,Kcz/DC,Wd4/DD,MAAA,ac5/DC,Wd+/DD,MAAA,ac//DC,UdkgED,MAAA,IclgEC,UdqgED,MAAA,acrgEC,UdwgED,MAAA,acxgEC,Ud2gED,MAAA,Ic3gEC,Ud8gED,MAAA,ac9gEC,UdihED,MAAA,acjhEC,UdohED,MAAA,IcphEC,UduhED,MAAA,acxgEC,Ud2gED,MAAA,Yc3gEC,gBd8gED,MAAA,Kc9gEC,gBdihED,MAAA,acjhEC,gBdohED,MAAA,acphEC,eduhED,MAAA,IcvhEC,ed0hED,MAAA,ac1hEC,ed6hED,MAAA,ac7hEC,edgiED,MAAA,IchiEC,edmiED,MAAA,acniEC,edsiED,MAAA,actiEC,edyiED,MAAA,IcziEC,ed4iED,MAAA,acviEC,ed0iED,MAAA,YczjEC,ed4jED,MAAA,Kc5jEC,gBd+jED,KAAA,Kc/jEC,gBdkkED,KAAA,aclkEC,gBdqkED,KAAA,acrkEC,edwkED,KAAA,IcxkEC,ed2kED,KAAA,ac3kEC,ed8kED,KAAA,ac9kEC,edilED,KAAA,IcjlEC,edolED,KAAA,acplEC,edulED,KAAA,acvlEC,ed0lED,KAAA,Ic1lEC,ed6lED,KAAA,acxlEC,ed2lED,KAAA,Yc5kEC,ed+kED,KAAA,Kc/kEC,kBdklED,YAAA,KcllEC,kBdqlED,YAAA,acrlEC,kBdwlED,YAAA,acxlEC,iBd2lED,YAAA,Ic3lEC,iBd8lED,YAAA,ac9lEC,iBdimED,YAAA,acjmEC,iBdomED,YAAA,IcpmEC,iBdumED,YAAA,acvmEC,iBd0mED,YAAA,ac1mEC,iBd6mED,YAAA,Ic7mEC,iBdgnED,YAAA,achnEC,iBdmnED,YAAA,YetrED,iBACA,YAAA,GAGA,MACA,iBAAA,YAEA,QfyrED,YAAA,IevrEC,eAAgB,IAChB,MAAA,KfyrED,WAAA,KelrEC,GACA,WAAA,KfsrED,OexrEC,MAAO,KdmsEP,UAAW,KACX,cAAe,KcvrET,mBd0rER,mBczrEQ,mBAHA,mBACA,mBd0rER,mBDHC,QAAA,IensEC,YAAa,WAoBX,eAAA,IACA,WAAA,IAAA,MAAA,KArBJ,mBdktEE,eAAgB,OAChB,cAAe,IAAI,MAAM,KDJ1B,uCCMD,uCcrtEA,wCdstEA,wCclrEI,2CANI,2CforEP,WAAA,EezqEG,mBf4qEH,WAAA,IAAA,MAAA,KCWD,cACE,iBAAkB,Kc/pEpB,6BdkqEA,6BcjqEE,6BAZM,6BfsqEP,6BCMD,6BDHC,QAAA,ICWD,gBACE,OAAQ,IAAI,MAAM,Kc1qEpB,4Bd6qEA,4Bc7qEA,4BAQQ,4Bf8pEP,4BCMD,4Bc7pEM,OAAA,IAAA,MAAA,KAYF,4BAFJ,4BfopEC,oBAAA,IevoEG,yCf0oEH,iBAAA,QehoEC,4BACA,iBAAA,QfooED,uBe9nEG,SAAA,OdyoEF,QAAS,acxoEL,MAAA,KAEA,sBfioEL,sBgB7wEC,SAAA,OfwxEA,QAAS,WACT,MAAO,KAST,0BerxEE,0Bf+wEF,0BAGA,0BexxEM,0BAMJ,0BfgxEF,0BAGA,0BACA,0BDNC,0BCAD,0BAGA,0BASE,iBAAkB,QDLnB,sCgBlyEC,sCAAA,oCfyyEF,sCetxEM,sCf2xEJ,iBAAkB,QASpB,2Be1yEE,2BfoyEF,2BAGA,2Be7yEM,2BAMJ,2BfqyEF,2BAGA,2BACA,2BDNC,2BCAD,2BAGA,2BASE,iBAAkB,QDLnB,uCgBvzEC,uCAAA,qCf8zEF,uCe3yEM,uCfgzEJ,iBAAkB,QASpB,wBe/zEE,wBfyzEF,wBAGA,wBel0EM,wBAMJ,wBf0zEF,wBAGA,wBACA,wBDNC,wBCAD,wBAGA,wBASE,iBAAkB,QDLnB,oCgB50EC,oCAAA,kCfm1EF,oCeh0EM,oCfq0EJ,iBAAkB,QASpB,2Bep1EE,2Bf80EF,2BAGA,2Bev1EM,2BAMJ,2Bf+0EF,2BAGA,2BACA,2BDNC,2BCAD,2BAGA,2BASE,iBAAkB,QDLnB,uCgBj2EC,uCAAA,qCfw2EF,uCer1EM,uCf01EJ,iBAAkB,QASpB,0Bez2EE,0Bfm2EF,0BAGA,0Be52EM,0BAMJ,0Bfo2EF,0BAGA,0BACA,0BDNC,0BCAD,0BAGA,0BASE,iBAAkB,QDLnB,sCehtEC,sCADF,oCdwtEA,sCe12EM,sCDoJJ,iBAAA,QA6DF,kBACE,WAAY,KA3DV,WAAA,KAEA,oCACA,kBACA,MAAA,KfotED,cAAA,Ke7pEC,WAAY,OAnDV,mBAAA,yBfmtEH,OAAA,IAAA,MAAA,KCWD,yBACE,cAAe,Ec5qEjB,qCd+qEA,qCcjtEI,qCARM,qCfktET,qCCMD,qCDHC,YAAA,OCWD,kCACE,OAAQ,EcvrEV,0Dd0rEA,0Dc1rEA,0DAzBU,0Df4sET,0DCMD,0DAME,YAAa,Ec/rEf,yDdksEA,yDclsEA,yDArBU,yDfgtET,yDCMD,yDAME,aAAc,EDLjB,yDe1sEW,yDEzNV,yDjBk6EC,yDiBj6ED,cAAA,GAMA,SjBk6ED,UAAA,EiB/5EC,QAAS,EACT,OAAA,EACA,OAAA,EAEA,OACA,QAAA,MACA,MAAA,KACA,QAAA,EACA,cAAA,KACA,UAAA,KjBi6ED,YAAA,QiB95EC,MAAO,KACP,OAAA,EACA,cAAA,IAAA,MAAA,QAEA,MjBg6ED,QAAA,aiBr5EC,UAAW,Kb4BX,cAAA,IACG,YAAA,IJ63EJ,mBiBr5EC,mBAAoB,WhBg6EjB,gBAAiB,WgB95EpB,WAAA,WjBy5ED,qBiBv5EC,kBAGA,OAAQ,IAAI,EAAE,EACd,WAAA,MjBs5ED,YAAA,OiBj5EC,iBACA,QAAA,MAIF,kBhB25EE,QAAS,MgBz5ET,MAAA,KAIF,iBAAA,ahB05EE,OAAQ,KI99ER,uBY2EF,2BjB64EC,wBiB54EC,QAAA,IAAA,KAAA,yBACA,eAAA,KAEA,OACA,QAAA,MjB+4ED,YAAA,IiBr3EC,UAAW,KACX,YAAA,WACA,MAAA,KAEA,cACA,QAAA,MACA,MAAA,KACA,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KbxDA,iBAAA,KACQ,iBAAA,KAyHR,OAAA,IAAA,MAAA,KACK,cAAA,IACG,mBAAA,MAAA,EAAA,IAAA,IAAA,iBJwzET,WAAA,MAAA,EAAA,IAAA,IAAA,iBkBh8EC,mBAAA,aAAA,YAAA,KAAA,mBAAA,YAAA,KACE,cAAA,aAAA,YAAA,KAAA,WAAA,YAAA,KACA,WAAA,aAAA,YAAA,KAAA,WAAA,YAAA,KdWM,oBJy7ET,aAAA,QIx5EC,QAAA,EACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,qBACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,qBAEF,gCAA0B,MAAA,KJ25E3B,QAAA,EI15EiC,oCJ65EjC,MAAA,KiBh4EG,yCACA,MAAA,KAQF,0BhBs4EA,iBAAkB,YAClB,OAAQ,EgBn4EN,wBjB63EH,wBiB13EC,iChBq4EA,iBAAkB,KgBn4EhB,QAAA,EAIF,wBACE,iCjB03EH,OAAA,YiB72EC,sBjBg3ED,OAAA,KiB91EG,mBhB02EF,mBAAoB,KAEtB,qDgB32EM,8BjBo2EH,8BiBj2EC,wCAAA,+BhB62EA,YAAa,KgB32EX,iCjBy2EH,iCiBt2EC,2CAAA,kChB02EF,0BACA,0BACA,oCACA,2BAKE,YAAa,KgBh3EX,iCjB82EH,iCACF,2CiBp2EC,kChBu2EA,0BACA,0BACA,oCACA,2BgBz2EA,YAAA,MhBi3EF,YgBv2EE,cAAA,KAGA,UADA,OjBi2ED,SAAA,SiBr2EC,QAAS,MhBg3ET,WAAY,KgBx2EV,cAAA,KAGA,gBADA,aAEA,WAAA,KjBi2EH,aAAA,KiB91EC,cAAe,EhBy2Ef,YAAa,IACb,OAAQ,QgBp2ER,+BjBg2ED,sCiBl2EC,yBACA,gCAIA,SAAU,ShBw2EV,WAAY,MgBt2EZ,YAAA,MAIF,oBAAA,cAEE,WAAA,KAGA,iBADA,cAEA,SAAA,SACA,QAAA,aACA,aAAA,KjB61ED,cAAA,EiB31EC,YAAa,IhBs2Eb,eAAgB,OgBp2EhB,OAAA,QAUA,kCjBo1ED,4BCWC,WAAY,EACZ,YAAa,KgBv1Eb,wCAAA,qCjBm1ED,8BCOD,+BgBh2EI,2BhB+1EJ,4BAME,OAAQ,YDNT,0BiBv1EG,uBAMF,oCAAA,iChB61EA,OAAQ,YDNT,yBiBp1EK,sBAaJ,mCAFF,gCAGE,OAAA,YAGA,qBjBy0ED,WAAA,KiBv0EC,YAAA,IhBk1EA,eAAgB,IgBh1Ed,cAAA,EjB00EH,8BiB5zED,8BCnQE,cAAA,EACA,aAAA,EAEA,UACA,OAAA,KlBkkFD,QAAA,IAAA,KkBhkFC,UAAA,KACE,YAAA,IACA,cAAA,IAGF,gBjB0kFA,OAAQ,KiBxkFN,YAAA,KD2PA,0BAFJ,kBAGI,OAAA,KAEA,6BACA,OAAA,KjBy0EH,QAAA,IAAA,KiB/0EC,UAAW,KAST,YAAA,IACA,cAAA,IAVJ,mChB81EE,OAAQ,KgBh1EN,YAAA,KAGA,6CAjBJ,qCAkBI,OAAA,KAEA,oCACA,OAAA,KjBy0EH,WAAA,KiBr0EC,QAAS,IAAI,KC/Rb,UAAA,KACA,YAAA,IAEA,UACA,OAAA,KlBumFD,QAAA,KAAA,KkBrmFC,UAAA,KACE,YAAA,UACA,cAAA,IAGF,gBjB+mFA,OAAQ,KiB7mFN,YAAA,KDuRA,0BAFJ,kBAGI,OAAA,KAEA,6BACA,OAAA,KjBk1EH,QAAA,KAAA,KiBx1EC,UAAW,KAST,YAAA,UACA,cAAA,IAVJ,mChBu2EE,OAAQ,KgBz1EN,YAAA,KAGA,6CAjBJ,qCAkBI,OAAA,KAEA,oCACA,OAAA,KjBk1EH,WAAA,KiBz0EC,QAAS,KAAK,KAEd,UAAA,KjB00ED,YAAA,UiBt0EG,cjBy0EH,SAAA,SiBp0EC,4BACA,cAAA,OAEA,uBACA,SAAA,SACA,IAAA,EACA,MAAA,EACA,QAAA,EACA,QAAA,MACA,MAAA,KjBu0ED,OAAA,KiBr0EC,YAAa,KhBg1Eb,WAAY,OACZ,eAAgB,KDLjB,oDiBv0EC,uCADA,iCAGA,MAAO,KhBg1EP,OAAQ,KACR,YAAa,KDLd,oDiBv0EC,uCADA,iCAKA,MAAO,KhB80EP,OAAQ,KACR,YAAa,KAKf,uBAEA,8BAJA,4BADA,yBAEA,oBAEA,2BDNC,4BkBruFG,mCAJA,yBD0ZJ,gCbvWE,MAAA,QJ2rFD,2BkBxuFG,aAAA,QACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBd4CJ,WAAA,MAAA,EAAA,IAAA,IAAA,iBJgsFD,iCiBz1EC,aAAc,QC5YZ,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QlByuFH,gCiB91EC,MAAO,QCtYL,iBAAA,QlBuuFH,aAAA,QCWD,oCACE,MAAO,QAKT,uBAEA,8BAJA,4BADA,yBAEA,oBAEA,2BDNC,4BkBnwFG,mCAJA,yBD6ZJ,gCb1WE,MAAA,QJytFD,2BkBtwFG,aAAA,QACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBd4CJ,WAAA,MAAA,EAAA,IAAA,IAAA,iBJ8tFD,iCiBp3EC,aAAc,QC/YZ,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QlBuwFH,gCiBz3EC,MAAO,QCzYL,iBAAA,QlBqwFH,aAAA,QCWD,oCACE,MAAO,QAKT,qBAEA,4BAJA,0BADA,uBAEA,kBAEA,yBDNC,0BkBjyFG,iCAJA,uBDgaJ,8Bb7WE,MAAA,QJuvFD,yBkBpyFG,aAAA,QACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBd4CJ,WAAA,MAAA,EAAA,IAAA,IAAA,iBJ4vFD,+BiB/4EC,aAAc,QClZZ,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QlBqyFH,8BiBp5EC,MAAO,QC5YL,iBAAA,QlBmyFH,aAAA,QiB/4EG,kCjBk5EH,MAAA,QiB/4EG,2CjBk5EH,IAAA,KiBv4EC,mDACA,IAAA,EAEA,YjB04ED,QAAA,MiBvzEC,WAAY,IAwEZ,cAAe,KAtIX,MAAA,QAEA,yBjBy3EH,yBiBrvEC,QAAS,aA/HP,cAAA,EACA,eAAA,OjBw3EH,2BiB1vEC,QAAS,aAxHP,MAAA,KjBq3EH,eAAA,OiBj3EG,kCACA,QAAA,aAmHJ,0BhB4wEE,QAAS,aACT,eAAgB,OgBr3Ed,wCjB82EH,6CiBtwED,2CjBywEC,MAAA,KiB72EG,wCACA,MAAA,KAmGJ,4BhBwxEE,cAAe,EgBp3Eb,eAAA,OAGA,uBADA,oBjB82EH,QAAA,aiBpxEC,WAAY,EhB+xEZ,cAAe,EgBr3EX,eAAA,OAsFN,6BAAA,0BAjFI,aAAA,EAiFJ,4CjB6xEC,sCiBx2EG,SAAA,SjB22EH,YAAA,EiBh2ED,kDhB42EE,IAAK,GgBl2EL,2BjB+1EH,kCiBh2EG,wBAEA,+BAXF,YAAa,IhBo3Eb,WAAY,EgBn2EV,cAAA,EJviBF,2BIshBF,wBJrhBE,WAAA,KI4jBA,6BAyBA,aAAc,MAnCV,YAAA,MAEA,yBjBw1EH,gCACF,YAAA,IiBx3EG,cAAe,EAwCf,WAAA,OAwBJ,sDAdQ,MAAA,KjB80EL,yBACF,+CiBn0EC,YAAA,KAEE,UAAW,MjBs0EZ,yBACF,+CmBp6FG,YAAa,IACf,UAAA,MAGA,KACA,QAAA,aACA,QAAA,IAAA,KAAA,cAAA,EACA,UAAA,KACA,YAAA,IACA,YAAA,WACA,WAAA,OC0CA,YAAA,OACA,eAAA,OACA,iBAAA,aACA,aAAA,ahB+JA,OAAA,QACG,oBAAA,KACC,iBAAA,KACI,gBAAA,KJ+tFT,YAAA,KmBv6FG,iBAAA,KlBm7FF,OAAQ,IAAI,MAAM,YAClB,cAAe,IkB96Ff,kBdzBA,kBACA,WLk8FD,kBCOD,kBADA,WAME,QAAS,IAAI,KAAK,yBAClB,eAAgB,KkBh7FhB,WnBy6FD,WmB56FG,WlBw7FF,MAAO,KkBn7FL,gBAAA,Kf6BM,YADR,YJk5FD,iBAAA,KmBz6FC,QAAA,ElBq7FA,mBAAoB,MAAM,EAAE,IAAI,IAAI,iBAC5B,WAAY,MAAM,EAAE,IAAI,IAAI,iBoBh+FpC,cAGA,ejB8DA,wBACQ,OAAA,YJ05FT,OAAA,kBmBz6FG,mBAAA,KlBq7FM,WAAY,KkBn7FhB,QAAA,IASN,eC3DE,yBACA,eAAA,KpBi+FD,aoB99FC,MAAA,KnB0+FA,iBAAkB,KmBx+FhB,aAAA,KpBk+FH,mBoBh+FO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpBi+FH,mBoB99FC,MAAA,KnB0+FA,iBAAkB,QAClB,aAAc,QmBt+FR,oBADJ,oBpBi+FH,mCoB99FG,MAAA,KnB0+FF,iBAAkB,QAClB,aAAc,QmBt+FN,0BnB4+FV,0BAHA,0BmB1+FM,0BnB4+FN,0BAHA,0BDFC,yCoBx+FK,yCnB4+FN,yCmBv+FE,MAAA,KnB++FA,iBAAkB,QAClB,aAAc,QmBx+FZ,oBpBg+FH,oBoBh+FG,mCnB6+FF,iBAAkB,KmBz+FV,4BnB8+FV,4BAHA,4BDHC,6BCOD,6BAHA,6BkB39FA,sCClBM,sCnB8+FN,sCmBx+FI,iBAAA,KACA,aAAA,KDcJ,oBC9DE,MAAA,KACA,iBAAA,KpB0hGD,aoBvhGC,MAAA,KnBmiGA,iBAAkB,QmBjiGhB,aAAA,QpB2hGH,mBoBzhGO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpB0hGH,mBoBvhGC,MAAA,KnBmiGA,iBAAkB,QAClB,aAAc,QmB/hGR,oBADJ,oBpB0hGH,mCoBvhGG,MAAA,KnBmiGF,iBAAkB,QAClB,aAAc,QmB/hGN,0BnBqiGV,0BAHA,0BmBniGM,0BnBqiGN,0BAHA,0BDFC,yCoBjiGK,yCnBqiGN,yCmBhiGE,MAAA,KnBwiGA,iBAAkB,QAClB,aAAc,QmBjiGZ,oBpByhGH,oBoBzhGG,mCnBsiGF,iBAAkB,KmBliGV,4BnBuiGV,4BAHA,4BDHC,6BCOD,6BAHA,6BkBjhGA,sCCrBM,sCnBuiGN,sCmBjiGI,iBAAA,QACA,aAAA,QDkBJ,oBClEE,MAAA,QACA,iBAAA,KpBmlGD,aoBhlGC,MAAA,KnB4lGA,iBAAkB,QmB1lGhB,aAAA,QpBolGH,mBoBllGO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpBmlGH,mBoBhlGC,MAAA,KnB4lGA,iBAAkB,QAClB,aAAc,QmBxlGR,oBADJ,oBpBmlGH,mCoBhlGG,MAAA,KnB4lGF,iBAAkB,QAClB,aAAc,QmBxlGN,0BnB8lGV,0BAHA,0BmB5lGM,0BnB8lGN,0BAHA,0BDFC,yCoB1lGK,yCnB8lGN,yCmBzlGE,MAAA,KnBimGA,iBAAkB,QAClB,aAAc,QmB1lGZ,oBpBklGH,oBoBllGG,mCnB+lGF,iBAAkB,KmB3lGV,4BnBgmGV,4BAHA,4BDHC,6BCOD,6BAHA,6BkBtkGA,sCCzBM,sCnBgmGN,sCmB1lGI,iBAAA,QACA,aAAA,QDsBJ,oBCtEE,MAAA,QACA,iBAAA,KpB4oGD,UoBzoGC,MAAA,KnBqpGA,iBAAkB,QmBnpGhB,aAAA,QpB6oGH,gBoB3oGO,gBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpB4oGH,gBoBzoGC,MAAA,KnBqpGA,iBAAkB,QAClB,aAAc,QmBjpGR,iBADJ,iBpB4oGH,gCoBzoGG,MAAA,KnBqpGF,iBAAkB,QAClB,aAAc,QmBjpGN,uBnBupGV,uBAHA,uBmBrpGM,uBnBupGN,uBAHA,uBDFC,sCoBnpGK,sCnBupGN,sCmBlpGE,MAAA,KnB0pGA,iBAAkB,QAClB,aAAc,QmBnpGZ,iBpB2oGH,iBoB3oGG,gCnBwpGF,iBAAkB,KmBppGV,yBnBypGV,yBAHA,yBDHC,0BCOD,0BAHA,0BkB3nGA,mCC7BM,mCnBypGN,mCmBnpGI,iBAAA,QACA,aAAA,QD0BJ,iBC1EE,MAAA,QACA,iBAAA,KpBqsGD,aoBlsGC,MAAA,KnB8sGA,iBAAkB,QmB5sGhB,aAAA,QpBssGH,mBoBpsGO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpBqsGH,mBoBlsGC,MAAA,KnB8sGA,iBAAkB,QAClB,aAAc,QmB1sGR,oBADJ,oBpBqsGH,mCoBlsGG,MAAA,KnB8sGF,iBAAkB,QAClB,aAAc,QmB1sGN,0BnBgtGV,0BAHA,0BmB9sGM,0BnBgtGN,0BAHA,0BDFC,yCoB5sGK,yCnBgtGN,yCmB3sGE,MAAA,KnBmtGA,iBAAkB,QAClB,aAAc,QmB5sGZ,oBpBosGH,oBoBpsGG,mCnBitGF,iBAAkB,KmB7sGV,4BnBktGV,4BAHA,4BDHC,6BCOD,6BAHA,6BkBhrGA,sCCjCM,sCnBktGN,sCmB5sGI,iBAAA,QACA,aAAA,QD8BJ,oBC9EE,MAAA,QACA,iBAAA,KpB8vGD,YoB3vGC,MAAA,KnBuwGA,iBAAkB,QmBrwGhB,aAAA,QpB+vGH,kBoB7vGO,kBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpB8vGH,kBoB3vGC,MAAA,KnBuwGA,iBAAkB,QAClB,aAAc,QmBnwGR,mBADJ,mBpB8vGH,kCoB3vGG,MAAA,KnBuwGF,iBAAkB,QAClB,aAAc,QmBnwGN,yBnBywGV,yBAHA,yBmBvwGM,yBnBywGN,yBAHA,yBDFC,wCoBrwGK,wCnBywGN,wCmBpwGE,MAAA,KnB4wGA,iBAAkB,QAClB,aAAc,QmBrwGZ,mBpB6vGH,mBoB7vGG,kCnB0wGF,iBAAkB,KmBtwGV,2BnB2wGV,2BAHA,2BDHC,4BCOD,4BAHA,4BkBruGA,qCCrCM,qCnB2wGN,qCmBrwGI,iBAAA,QACA,aAAA,QDuCJ,mBACE,MAAA,QACA,iBAAA,KnB+tGD,UmB5tGC,YAAA,IlBwuGA,MAAO,QACP,cAAe,EAEjB,UGzwGE,iBemCE,iBflCM,oBJkwGT,6BmB7tGC,iBAAA,YlByuGA,mBAAoB,KACZ,WAAY,KkBtuGlB,UAEF,iBAAA,gBnB6tGD,gBmB3tGG,aAAA,YnBiuGH,gBmB/tGG,gBAIA,MAAA,QlBuuGF,gBAAiB,UACjB,iBAAkB,YDNnB,0BmBhuGK,0BAUN,mCATM,mClB2uGJ,MAAO,KmB1yGP,gBAAA,KAGA,mBADA,QpBmyGD,QAAA,KAAA,KmBztGC,UAAW,KlBquGX,YAAa,UmBjzGb,cAAA,IAGA,mBADA,QpB0yGD,QAAA,IAAA,KmB5tGC,UAAW,KlBwuGX,YAAa,ImBxzGb,cAAA,IAGA,mBADA,QpBizGD,QAAA,IAAA,ImB3tGC,UAAW,KACX,YAAA,IACA,cAAA,IAIF,WACE,QAAA,MnB2tGD,MAAA,KCYD,sBACE,WAAY,IqBz3GZ,6BADF,4BtBk3GC,6BI7rGC,MAAA,KAEQ,MJisGT,QAAA,EsBr3GC,mBAAA,QAAA,KAAA,OACE,cAAA,QAAA,KAAA,OtBu3GH,WAAA,QAAA,KAAA,OsBl3GC,StBq3GD,QAAA,EsBn3Ga,UtBs3Gb,QAAA,KsBr3Ga,atBw3Gb,QAAA,MsBv3Ga,etB03Gb,QAAA,UsBt3GC,kBACA,QAAA,gBlBwKA,YACQ,SAAA,SAAA,OAAA,EAOR,SAAA,OACQ,mCAAA,KAAA,8BAAA,KAGR,2BAAA,KACQ,4BAAA,KAAA,uBAAA,KJ2sGT,oBAAA,KuBr5GC,4BAA6B,OAAQ,WACrC,uBAAA,OAAA,WACA,oBAAA,OAAA,WAEA,OACA,QAAA,aACA,MAAA,EACA,OAAA,EACA,YAAA,IACA,eAAA,OvBu5GD,WAAA,IAAA,OuBn5GC,WAAY,IAAI,QtBk6GhB,aAAc,IAAI,MAAM,YsBh6GxB,YAAA,IAAA,MAAA,YAKA,UADF,QvBo5GC,SAAA,SuB94GC,uBACA,QAAA,EAEA,eACA,SAAA,SACA,IAAA,KACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,MAAA,KACA,UAAA,MACA,QAAA,IAAA,EACA,OAAA,IAAA,EAAA,EACA,UAAA,KACA,WAAA,KACA,WAAA,KnBsBA,iBAAA,KACQ,wBAAA,YmBrBR,gBAAA,YtB+5GA,OsB/5GA,IAAA,MAAA,KvBk5GD,OAAA,IAAA,MAAA,gBuB74GC,cAAA,IACE,mBAAA,EAAA,IAAA,KAAA,iBACA,WAAA,EAAA,IAAA,KAAA,iBAzBJ,0BCzBE,MAAA,EACA,KAAA,KAEA,wBxBo8GD,OAAA,IuB96GC,OAAQ,IAAI,EAmCV,SAAA,OACA,iBAAA,QAEA,oBACA,QAAA,MACA,QAAA,IAAA,KACA,MAAA,KvB84GH,YAAA,IuBx4GC,YAAA,WtBw5GA,MAAO,KsBt5GL,YAAA,OvB44GH,0BuB14GG,0BAMF,MAAA,QtBo5GA,gBAAiB,KACjB,iBAAkB,QsBj5GhB,yBAEA,+BADA,+BvBu4GH,MAAA,KuB73GC,gBAAA,KtB64GA,iBAAkB,QAClB,QAAS,EDZV,2BuB33GC,iCAAA,iCAEE,MAAA,KEzGF,iCF2GE,iCAEA,gBAAA,KvB63GH,OAAA,YuBx3GC,iBAAkB,YAGhB,iBAAA,KvBw3GH,OAAA,0DuBn3GG,qBvBs3GH,QAAA,MuB72GC,QACA,QAAA,EAQF,qBACE,MAAA,EACA,KAAA,KAIF,oBACE,MAAA,KACA,KAAA,EAEA,iBACA,QAAA,MACA,QAAA,IAAA,KvBw2GD,UAAA,KuBp2GC,YAAa,WACb,MAAA,KACA,YAAA,OAEA,mBACA,SAAA,MACA,IAAA,EvBs2GD,MAAA,EuBl2GC,OAAQ,EACR,KAAA,EACA,QAAA,IAQF,2BtB42GE,MAAO,EsBx2GL,KAAA,KAEA,eACA,sCvB41GH,QAAA,GuBn2GC,WAAY,EtBm3GZ,cAAe,IAAI,OsBx2GjB,cAAA,IAAA,QAEA,uBvB41GH,8CuBv0GC,IAAK,KAXL,OAAA,KApEA,cAAA,IvB25GC,yBuBv1GD,6BA1DA,MAAA,EACA,KAAA,KvBq5GD,kC0BpiHG,MAAO,KzBojHP,KAAM,GyBhjHR,W1BsiHD,oB0B1iHC,SAAU,SzB0jHV,QAAS,ayBpjHP,eAAA,OAGA,yB1BsiHH,gBCgBC,SAAU,SACV,MAAO,KyB7iHT,gC1BsiHC,gCCYD,+BAFA,+ByBhjHA,uBANM,uBzBujHN,sBAFA,sBAQE,QAAS,EyBljHP,qB1BuiHH,2B0BliHD,2BACE,iC1BoiHD,YAAA,KCgBD,aACE,YAAa,KDZd,kB0B1iHD,wBAAA,0BzB2jHE,MAAO,KDZR,kB0B/hHD,wBACE,0B1BiiHD,YAAA,I0B5hHC,yE1B+hHD,cAAA,E2BhlHC,4BACG,YAAA,EDsDL,mEzB6iHE,wBAAyB,E0B5lHzB,2BAAA,E3BilHD,6C0B5hHD,8CACE,uBAAA,E1B8hHD,0BAAA,E0B3hHC,sB1B8hHD,MAAA,KCgBD,8D0B/mHE,cAAA,E3BomHD,mE0B3hHD,oECjEE,wBAAA,EACG,2BAAA,EDqEL,oEzB0iHE,uBAAwB,EyBxiHxB,0BAAA,EAiBF,mCACE,iCACA,QAAA,EAEF,iCACE,cAAA,IACA,aAAA,IAKF,oCtB/CE,cAAA,KACQ,aAAA,KsBkDR,iCtBnDA,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBsByDV,0CACE,mBAAA,K1BugHD,WAAA,K0BngHC,YACA,YAAA,EAGF,eACE,aAAA,IAAA,IAAA,E1BqgHD,oBAAA,ECgBD,uBACE,aAAc,EAAE,IAAI,IyB1gHlB,yBACA,+BACA,oC1B+/GH,QAAA,M0BtgHC,MAAO,KAcH,MAAA,K1B2/GL,UAAA,KCgBD,oCACE,MAAO,KyBpgHL,8BACA,oC1By/GH,oC0Bp/GC,0CACE,WAAA,K1Bs/GH,YAAA,E2B/pHC,4DACC,cAAA,EAQA,sD3B4pHF,uBAAA,I0Bt/GC,wBAAA,IC/KA,2BAAA,EACC,0BAAA,EAQA,sD3BkqHF,uBAAA,E0Bv/GC,wBAAyB,EACzB,2BAAA,I1By/GD,0BAAA,ICgBD,uE0BtrHE,cAAA,E3B2qHD,4E0Bt/GD,6EC7LE,2BAAA,EACC,0BAAA,EDoMH,6EACE,uBAAA,EACA,wBAAA,EAEA,qB1Bo/GD,QAAA,M0Bx/GC,MAAO,KzBwgHP,aAAc,MyBjgHZ,gBAAA,SAEA,0B1Bq/GH,gC0B9/GC,QAAS,WAYP,MAAA,K1Bq/GH,MAAA,G0Bj/GG,qC1Bo/GH,MAAA,KCgBD,+CACE,KAAM,KyB7+GF,gDAFA,6C1Bs+GL,2D0Br+GK,wDEzOJ,SAAU,SACV,KAAA,cACA,eAAA,K5BitHD,a4B7sHC,SAAA,SACE,QAAA,MACA,gBAAA,S5BgtHH,0B4BxtHC,MAAO,KAeL,cAAA,EACA,aAAA,EAOA,2BACA,SAAA,S5BusHH,QAAA,E4BrsHG,MAAA,KACE,MAAA,K5BusHL,cAAA,ECgBD,iCACE,QAAS,EiBnrHT,8BACA,mCACA,sCACA,OAAA,KlBwqHD,QAAA,KAAA,KkBtqHC,UAAA,KjBsrHA,YAAa,UACb,cAAe,IiBrrHb,oClB0qHH,yCkBvqHC,4CjBurHA,OAAQ,KACR,YAAa,KDTd,8C4B/sHD,mDAAA,sD3B0tHA,sCACA,2CiBzrHI,8CjB8rHF,OAAQ,KiB1sHR,8BACA,mCACA,sCACA,OAAA,KlB+rHD,QAAA,IAAA,KkB7rHC,UAAA,KjB6sHA,YAAa,IACb,cAAe,IiB5sHb,oClBisHH,yCkB9rHC,4CjB8sHA,OAAQ,KACR,YAAa,KDTd,8C4B7tHD,mDAAA,sD3BwuHA,sCACA,2CiBhtHI,8CjBqtHF,OAAQ,K2BzuHR,2B5B6tHD,mB4B7tHC,iB3B8uHA,QAAS,W2BzuHX,8D5B6tHC,sD4B7tHD,oDAEE,cAAA,EAEA,mB5B+tHD,iB4B1tHC,MAAO,GACP,YAAA,OACA,eAAA,OAEA,mBACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,K5B4tHD,WAAA,O4BztHC,iBAAA,KACE,OAAA,IAAA,MAAA,KACA,cAAA,I5B4tHH,4B4BztHC,QAAA,IAAA,KACE,UAAA,KACA,cAAA,I5B4tHH,4B4B/uHC,QAAS,KAAK,K3B+vHd,UAAW,K2BruHT,cAAA,IAKJ,wCAAA,qC3BquHE,WAAY,EAEd,uCACA,+BACA,kC0B70HE,6CACG,8CC4GL,6D5BqtHC,wE4BptHC,wBAAA,E5ButHD,2BAAA,ECgBD,+BACE,aAAc,EAEhB,sCACA,8B2BhuHA,+D5BstHC,oDCWD,iC0Bl1HE,4CACG,6CCiHH,uBAAA,E5BwtHD,0BAAA,E4BltHC,8BAGA,YAAA,E5BotHD,iB4BxtHC,SAAU,SAUR,UAAA,E5BitHH,YAAA,O4B/sHK,sB5BktHL,SAAA,SCgBD,2BACE,YAAa,K2BxtHb,6BAAA,4B5B4sHD,4B4BzsHK,QAAA,EAGJ,kCAAA,wCAGI,aAAA,K5B4sHL,iC6B12HD,uCACE,QAAA,EACA,YAAA,K7B62HD,K6B/2HC,aAAc,EAOZ,cAAA,EACA,WAAA,KARJ,QAWM,SAAA,SACA,QAAA,M7B42HL,U6B12HK,SAAA,S5B03HJ,QAAS,M4Bx3HH,QAAA,KAAA,KAMJ,gB7Bu2HH,gB6Bt2HK,gBAAA,K7By2HL,iBAAA,KCgBD,mB4Br3HQ,MAAA,KAGA,yBADA,yB7B02HP,MAAA,K6Bl2HG,gBAAA,K5Bk3HF,OAAQ,YACR,iBAAkB,Y4B/2Hd,aAzCN,mB7B64HC,mBwBh5HC,iBAAA,KACA,aAAA,QAEA,kBxBm5HD,OAAA,I6Bn5HC,OAAQ,IAAI,EA0DV,SAAA,O7B41HH,iBAAA,Q6Bl1HC,c7Bq1HD,UAAA,K6Bn1HG,UAEA,cAAA,IAAA,MAAA,KALJ,aASM,MAAA,KACA,cAAA,KAEA,e7Bo1HL,aAAA,I6Bn1HK,YAAA,WACE,OAAA,IAAA,MAAA,Y7Bq1HP,cAAA,IAAA,IAAA,EAAA,ECgBD,qBACE,aAAc,KAAK,KAAK,K4B51HlB,sBAEA,4BADA,4BAEA,MAAA,K7Bi1HP,OAAA,Q6B50HC,iBAAA,KAqDA,OAAA,IAAA,MAAA,KA8BA,oBAAA,YAnFA,wBAwDE,MAAA,K7B2xHH,cAAA,E6BzxHK,2BACA,MAAA,KA3DJ,6BAgEE,cAAA,IACA,WAAA,OAYJ,iDA0DE,IAAK,KAjED,KAAA,K7B0xHH,yB6BztHD,2BA9DM,QAAA,W7B0xHL,MAAA,G6Bn2HD,6BAuFE,cAAA,GAvFF,6B5Bw3HA,aAAc,EACd,cAAe,IDZhB,kC6BtuHD,wCA3BA,wCATM,OAAA,IAAA,MAAA,K7B+wHH,yB6B3uHD,6B5B2vHE,cAAe,IAAI,MAAM,KACzB,cAAe,IAAI,IAAI,EAAE,EDZ1B,kC6B92HD,wC7B+2HD,wC6B72HG,oBAAA,MAIE,c7B+2HL,MAAA,K6B52HK,gB7B+2HL,cAAA,ICgBD,iBACE,YAAa,I4Bv3HP,uBAQR,6B7Bo2HC,6B6Bl2HG,MAAA,K7Bq2HH,iBAAA,Q6Bn2HK,gBACA,MAAA,KAYN,mBACE,WAAA,I7B41HD,YAAA,E6Bz1HG,e7B41HH,MAAA,K6B11HK,kBACA,MAAA,KAPN,oBAYI,cAAA,IACA,WAAA,OAYJ,wCA0DE,IAAK,KAjED,KAAA,K7B21HH,yB6B1xHD,kBA9DM,QAAA,W7B21HL,MAAA,G6Bl1HD,oBACA,cAAA,GAIE,oBACA,cAAA,EANJ,yB5B02HE,aAAc,EACd,cAAe,IDZhB,8B6B1yHD,oCA3BA,oCATM,OAAA,IAAA,MAAA,K7Bm1HH,yB6B/yHD,yB5B+zHE,cAAe,IAAI,MAAM,KACzB,cAAe,IAAI,IAAI,EAAE,EDZ1B,8B6Bx0HD,oC7By0HD,oC6Bv0HG,oBAAA,MAGA,uB7B00HH,QAAA,K6B/zHC,qBF3OA,QAAA,M3B+iID,yB8BxiIC,WAAY,KACZ,uBAAA,EACA,wBAAA,EAEA,Q9B0iID,SAAA,S8BliIC,WAAY,KA8nBZ,cAAe,KAhoBb,OAAA,IAAA,MAAA,Y9ByiIH,yB8BzhIC,QAgnBE,cAAe,K9B86GlB,yB8BjhIC,eACA,MAAA,MAGA,iBACA,cAAA,KAAA,aAAA,KAEA,WAAA,Q9BkhID,2BAAA,M8BhhIC,WAAA,IAAA,MAAA,YACE,mBAAA,MAAA,EAAA,IAAA,EAAA,qB9BkhIH,WAAA,MAAA,EAAA,IAAA,EAAA,qB8Bz7GD,oBArlBI,WAAA,KAEA,yBAAA,iB9BkhID,MAAA,K8BhhIC,WAAA,EACE,mBAAA,KACA,WAAA,KAEA,0B9BkhIH,QAAA,gB8B/gIC,OAAA,eACE,eAAA,E9BihIH,SAAA,kBCkBD,oBACE,WAAY,QDZf,sC8B/gIK,mC9B8gIH,oC8BzgIC,cAAe,E7B4hIf,aAAc,G6Bj+GlB,sCAnjBE,mC7ByhIA,WAAY,MDdX,4D8BngID,sC9BogID,mCCkBG,WAAY,O6B3gId,kCANE,gC9BsgIH,4B8BvgIG,0BAuiBF,aAAc,M7Bm/Gd,YAAa,MAEf,yBDZC,kC8B3gIK,gC9B0gIH,4B8B3gIG,0BAcF,aAAc,EAChB,YAAA,GAMF,mBA8gBE,QAAS,KAhhBP,aAAA,EAAA,EAAA,I9BkgIH,yB8B7/HC,mB7B+gIE,cAAe,G6B1gIjB,qBADA,kB9BggID,SAAA,M8Bz/HC,MAAO,EAggBP,KAAM,E7B4gHN,QAAS,KDdR,yB8B7/HD,qB9B8/HD,kB8B7/HC,cAAA,GAGF,kBACE,IAAA,EACA,aAAA,EAAA,EAAA,I9BigID,qB8B1/HC,OAAQ,EACR,cAAA,EACA,aAAA,IAAA,EAAA,EAEA,cACA,MAAA,K9B4/HD,OAAA,K8B1/HC,QAAA,KAAA,K7B4gIA,UAAW,K6B1gIT,YAAA,KAIA,oBAbJ,oB9BwgIC,gBAAA,K8Bv/HG,kB7B0gIF,QAAS,MDdR,yBACF,iC8Bh/HC,uCACA,YAAA,OAGA,eC9LA,SAAA,SACA,MAAA,MD+LA,QAAA,IAAA,KACA,WAAA,IACA,aAAA,KACA,cAAA,I9Bm/HD,iBAAA,Y8B/+HC,iBAAA,KACE,OAAA,IAAA,MAAA,Y9Bi/HH,cAAA,I8B5+HG,qBACA,QAAA,EAEA,yB9B++HH,QAAA,M8BrgIC,MAAO,KAyBL,OAAA,I9B++HH,cAAA,I8BpjHD,mCAvbI,WAAA,I9Bg/HH,yB8Bt+HC,eACA,QAAA,MAGE,YACA,OAAA,MAAA,M9By+HH,iB8B58HC,YAAA,KA2YA,eAAgB,KAjaZ,YAAA,KAEA,yBACA,iCACA,SAAA,OACA,MAAA,KACA,MAAA,KAAA,WAAA,E9Bs+HH,iBAAA,Y8B3kHC,OAAQ,E7B8lHR,mBAAoB,K6Bt/HhB,WAAA,KAGA,kDAqZN,sC9BklHC,QAAA,IAAA,KAAA,IAAA,KCmBD,sC6Bv/HQ,YAAA,KAmBR,4C9Bs9HD,4C8BvlHG,iBAAkB,M9B4lHnB,yB8B5lHD,YAtYI,MAAA,K9Bq+HH,OAAA,E8Bn+HK,eACA,MAAA,K9Bu+HP,iB8B39HG,YAAa,KACf,eAAA,MAGA,aACA,QAAA,KAAA,K1B9NA,WAAA,IACQ,aAAA,M2B/DR,cAAA,IACA,YAAA,M/B4vID,WAAA,IAAA,MAAA,YiBtuHC,cAAe,IAAI,MAAM,YAwEzB,mBAAoB,MAAM,EAAE,IAAI,EAAE,qBAAyB,EAAE,IAAI,EAAE,qBAtI/D,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,EAAA,IAAA,EAAA,qBAEA,yBjBwyHH,yBiBpqHC,QAAS,aA/HP,cAAA,EACA,eAAA,OjBuyHH,2BiBzqHC,QAAS,aAxHP,MAAA,KjBoyHH,eAAA,OiBhyHG,kCACA,QAAA,aAmHJ,0BhBmsHE,QAAS,aACT,eAAgB,OgB5yHd,wCjB6xHH,6CiBrrHD,2CjBwrHC,MAAA,KiB5xHG,wCACA,MAAA,KAmGJ,4BhB+sHE,cAAe,EgB3yHb,eAAA,OAGA,uBADA,oBjB6xHH,QAAA,aiBnsHC,WAAY,EhBstHZ,cAAe,EgB5yHX,eAAA,OAsFN,6BAAA,0BAjFI,aAAA,EAiFJ,4CjB4sHC,sCiBvxHG,SAAA,SjB0xHH,YAAA,E8BngID,kDAmWE,IAAK,GAvWH,yBACE,yB9B8gIL,cAAA,I8B5/HD,oCAoVE,cAAe,GA1Vf,yBACA,aACA,MAAA,KACA,YAAA,E1BzPF,eAAA,EACQ,aAAA,EJmwIP,YAAA,EACF,OAAA,E8BngIG,mBAAoB,KACtB,WAAA,M9BugID,8B8BngIC,WAAY,EACZ,uBAAA,EHzUA,wBAAA,EAQA,mDACC,cAAA,E3By0IF,uBAAA,I8B//HC,wBAAyB,IChVzB,2BAAA,EACA,0BAAA,EDkVA,YCnVA,WAAA,IACA,cAAA,IDqVA,mBCtVA,WAAA,KACA,cAAA,KD+VF,mBChWE,WAAA,KACA,cAAA,KDuWF,aAsSE,WAAY,KA1SV,cAAA,KAEA,yB9B+/HD,aACF,MAAA,K8Bl+HG,aAAc,KAhBhB,YAAA,MACA,yBE5WA,aF8WE,MAAA,eAFF,cAKI,MAAA,gB9Bu/HH,aAAA,M8B7+HD,4BACA,aAAA,GADF,gBAKI,iBAAA,Q9Bg/HH,aAAA,QCmBD,8B6BhgIM,MAAA,KARN,oC9B0/HC,oC8B5+HG,MAAA,Q9B++HH,iBAAA,Y8B1+HK,6B9B6+HL,MAAA,KCmBD,iC6B5/HQ,MAAA,KAKF,uC9By+HL,uCCmBC,MAAO,KACP,iBAAkB,Y6Bz/HZ,sCAIF,4C9Bu+HL,4CCmBC,MAAO,KACP,iBAAkB,Q6Bv/HZ,wCAxCR,8C9BihIC,8C8Bn+HG,MAAA,K9Bs+HH,iBAAA,YCmBD,+B6Bt/HM,aAAA,KAGA,qCApDN,qC9B2hIC,iBAAA,KCmBD,yC6Bp/HI,iBAAA,KAOE,iCAAA,6B7Bk/HJ,aAAc,Q6B9+HR,oCAiCN,0C9B+7HD,0C8B3xHC,MAAO,KA7LC,iBAAA,QACA,yB7B8+HR,sD6B5+HU,MAAA,KAKF,4D9By9HP,4DCmBC,MAAO,KACP,iBAAkB,Y6Bz+HV,2DAIF,iE9Bu9HP,iECmBC,MAAO,KACP,iBAAkB,Q6Bv+HV,6D9B09HX,mEADE,mE8B1jIC,MAAO,KA8GP,iBAAA,aAEE,6B9Bi9HL,MAAA,K8B58HG,mC9B+8HH,MAAA,KCmBD,0B6B/9HM,MAAA,KAIA,gCAAA,gC7Bg+HJ,MAAO,K6Bt9HT,0CARQ,0CASN,mD9Bu8HD,mD8Bt8HC,MAAA,KAFF,gBAKI,iBAAA,K9B08HH,aAAA,QCmBD,8B6B19HM,MAAA,QARN,oC9Bo9HC,oC8Bt8HG,MAAA,K9By8HH,iBAAA,Y8Bp8HK,6B9Bu8HL,MAAA,QCmBD,iC6Bt9HQ,MAAA,QAKF,uC9Bm8HL,uCCmBC,MAAO,KACP,iBAAkB,Y6Bn9HZ,sCAIF,4C9Bi8HL,4CCmBC,MAAO,KACP,iBAAkB,Q6Bj9HZ,wCAxCR,8C9B2+HC,8C8B57HG,MAAA,K9B+7HH,iBAAA,YCmBD,+B6B/8HM,aAAA,KAGA,qCArDN,qC9Bq/HC,iBAAA,KCmBD,yC6B78HI,iBAAA,KAME,iCAAA,6B7B48HJ,aAAc,Q6Bx8HR,oCAuCN,0C9Bm5HD,0C8B33HC,MAAO,KAvDC,iBAAA,QAuDV,yBApDU,kE9Bs7HP,aAAA,Q8Bn7HO,0D9Bs7HP,iBAAA,QCmBD,sD6Bt8HU,MAAA,QAKF,4D9Bm7HP,4DCmBC,MAAO,KACP,iBAAkB,Y6Bn8HV,2DAIF,iE9Bi7HP,iECmBC,MAAO,KACP,iBAAkB,Q6Bj8HV,6D9Bo7HX,mEADE,mE8B1hIC,MAAO,KA+GP,iBAAA,aAEE,6B9Bg7HL,MAAA,Q8B36HG,mC9B86HH,MAAA,KCmBD,0B6B97HM,MAAA,QAIA,gCAAA,gC7B+7HJ,MAAO,KgCvkJT,0CH0oBQ,0CGzoBN,mDjCwjJD,mDiCvjJC,MAAA,KAEA,YACA,QAAA,IAAA,KjC2jJD,cAAA,KiChkJC,WAAY,KAQV,iBAAA,QjC2jJH,cAAA,IiCxjJK,eACA,QAAA,ajC4jJL,yBiCxkJC,QAAS,EAAE,IAkBT,MAAA,KjCyjJH,QAAA,SkC5kJC,oBACA,MAAA,KAEA,YlC+kJD,QAAA,akCnlJC,aAAc,EAOZ,OAAA,KAAA,ElC+kJH,cAAA,ICmBD,eiC/lJM,QAAA,OAEA,iBACA,oBACA,SAAA,SACA,MAAA,KACA,QAAA,IAAA,KACA,YAAA,KACA,YAAA,WlCglJL,MAAA,QkC9kJG,gBAAA,KjCimJF,iBAAkB,KiC9lJZ,OAAA,IAAA,MAAA,KPVH,6B3B2lJJ,gCkC7kJG,YAAA,EjCgmJF,uBAAwB,I0BvnJxB,0BAAA,I3BymJD,4BkCxkJG,+BjC2lJF,wBAAyB,IACzB,2BAA4B,IiCxlJxB,uBAFA,uBAGA,0BAFA,0BlC8kJL,QAAA,EkCtkJG,MAAA,QjCylJF,iBAAkB,KAClB,aAAc,KAEhB,sBiCvlJM,4BAFA,4BjC0lJN,yBiCvlJM,+BAFA,+BAGA,QAAA,ElC2kJL,MAAA,KkCloJC,OAAQ,QjCqpJR,iBAAkB,QAClB,aAAc,QiCnlJV,wBAEA,8BADA,8BjColJN,2BiCtlJM,iCjCulJN,iCDZC,MAAA,KkC/jJC,OAAQ,YjCklJR,iBAAkB,KkC7pJd,aAAA,KAEA,oBnC8oJL,uBmC5oJG,QAAA,KAAA,KlC+pJF,UAAW,K0B1pJX,YAAA,U3B4oJD,gCmC3oJG,mClC8pJF,uBAAwB,I0BvqJxB,0BAAA,I3BypJD,+BkC1kJD,kCjC6lJE,wBAAyB,IkC7qJrB,2BAAA,IAEA,oBnC8pJL,uBmC5pJG,QAAA,IAAA,KlC+qJF,UAAW,K0B1qJX,YAAA,I3B4pJD,gCmC3pJG,mClC8qJF,uBAAwB,I0BvrJxB,0BAAA,I3ByqJD,+BoC3qJD,kCACE,wBAAA,IACA,2BAAA,IAEA,OpC6qJD,aAAA,EoCjrJC,OAAQ,KAAK,EAOX,WAAA,OpC6qJH,WAAA,KCmBD,UmC7rJM,QAAA,OAEA,YACA,eACA,QAAA,apC8qJL,QAAA,IAAA,KoC5rJC,iBAAkB,KnC+sJlB,OAAQ,IAAI,MAAM,KmC5rJd,cAAA,KAnBN,kBpCisJC,kBCmBC,gBAAiB,KmCzrJb,iBAAA,KA3BN,eAAA,kBAkCM,MAAA,MAlCN,mBAAA,sBnC6tJE,MAAO,KmClrJH,mBAEA,yBADA,yBpCqqJL,sBqCltJC,MAAO,KACP,OAAA,YACA,iBAAA,KAEA,OACA,QAAA,OACA,QAAA,KAAA,KAAA,KACA,UAAA,IACA,YAAA,IACA,YAAA,EACA,MAAA,KrCotJD,WAAA,OqChtJG,YAAA,OpCmuJF,eAAgB,SoCjuJZ,cAAA,MrCotJL,cqCltJK,cAKJ,MAAA,KACE,gBAAA,KrC+sJH,OAAA,QqC1sJG,aACA,QAAA,KAOJ,YCtCE,SAAA,StC+uJD,IAAA,KCmBD,eqC7vJM,iBAAA,KALJ,2BD0CF,2BrC4sJC,iBAAA,QCmBD,eqCpwJM,iBAAA,QALJ,2BD8CF,2BrC+sJC,iBAAA,QCmBD,eqC3wJM,iBAAA,QALJ,2BDkDF,2BrCktJC,iBAAA,QCmBD,YqClxJM,iBAAA,QALJ,wBDsDF,wBrCqtJC,iBAAA,QCmBD,eqCzxJM,iBAAA,QALJ,2BD0DF,2BrCwtJC,iBAAA,QCmBD,cqChyJM,iBAAA,QCDJ,0BADF,0BAEE,iBAAA,QAEA,OACA,QAAA,aACA,UAAA,KACA,QAAA,IAAA,IACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OvCqxJD,YAAA,OuClxJC,eAAA,OACE,iBAAA,KvCoxJH,cAAA,KuC/wJG,aACA,QAAA,KAGF,YtCkyJA,SAAU,SsChyJR,IAAA,KAMA,0BvC4wJH,eCmBC,IAAK,EsC7xJD,QAAA,IAAA,IvCgxJL,cuC9wJK,cAKJ,MAAA,KtC4xJA,gBAAiB,KsC1xJf,OAAA,QvC4wJH,+BuCxwJC,4BACE,MAAA,QvC0wJH,iBAAA,KuCtwJG,wBvCywJH,MAAA,MuCrwJG,+BvCwwJH,aAAA,IwCj0JC,uBACA,YAAA,IAEA,WACA,YAAA,KxCo0JD,eAAA,KwCz0JC,cAAe,KvC41Jf,MAAO,QuCn1JL,iBAAA,KAIA,eAbJ,cAcI,MAAA,QxCo0JH,awCl1JC,cAAe,KAmBb,UAAA,KxCk0JH,YAAA,ICmBD,cuCh1JI,iBAAA,QAEA,sBxCi0JH,4BwC31JC,cAAe,KA8Bb,aAAA,KxCg0JH,cAAA,IwC7yJD,sBAfI,UAAA,KxCi0JD,oCwC9zJC,WvCi1JA,YAAa,KuC/0JX,eAAA,KxCi0JH,sBwCvzJD,4BvC00JE,cAAe,KuC90Jb,aAAA,KC5CJ,ezC42JD,cyC32JC,UAAA,MAGA,WACA,QAAA,MACA,QAAA,IACA,cAAA,KrCiLA,YAAA,WACK,iBAAA,KACG,OAAA,IAAA,MAAA,KJ8rJT,cAAA,IyCx3JC,mBAAoB,OAAO,IAAI,YxC24J1B,cAAe,OAAO,IAAI,YwC93J7B,WAAA,OAAA,IAAA,YAKF,iBzC22JD,eCmBC,aAAc,KACd,YAAa,KwCv3JX,mBA1BJ,kBzCk4JC,kByCv2JG,aAAA,QCzBJ,oBACE,QAAA,IACA,MAAA,KAEA,O1Cs4JD,QAAA,K0C14JC,cAAe,KAQb,OAAA,IAAA,MAAA,YAEA,cAAA,IAVJ,UAeI,WAAA,E1Ck4JH,MAAA,QCmBD,mByC/4JI,YAAA,IArBJ,SAyBI,U1C+3JH,cAAA,ECmBD,WyCx4JE,WAAA,IAFF,mBAAA,mBAMI,cAAA,KAEA,0BACA,0B1Cy3JH,SAAA,S0Cj3JC,IAAK,KCvDL,MAAA,MACA,MAAA,Q3C46JD,e0Ct3JC,MAAO,QClDL,iBAAA,Q3C26JH,aAAA,Q2Cx6JG,kB3C26JH,iBAAA,Q2Cn7JC,2BACA,MAAA,Q3Cu7JD,Y0C73JC,MAAO,QCtDL,iBAAA,Q3Cs7JH,aAAA,Q2Cn7JG,e3Cs7JH,iBAAA,Q2C97JC,wBACA,MAAA,Q3Ck8JD,e0Cp4JC,MAAO,QC1DL,iBAAA,Q3Ci8JH,aAAA,Q2C97JG,kB3Ci8JH,iBAAA,Q2Cz8JC,2BACA,MAAA,Q3C68JD,c0C34JC,MAAO,QC9DL,iBAAA,Q3C48JH,aAAA,Q2Cz8JG,iB3C48JH,iBAAA,Q4C78JC,0BAAQ,MAAA,QACR,wCAAQ,K5Cm9JP,oBAAA,KAAA,E4C/8JD,GACA,oBAAA,EAAA,GACA,mCAAQ,K5Cq9JP,oBAAA,KAAA,E4Cv9JD,GACA,oBAAA,EAAA,GACA,gCAAQ,K5Cq9JP,oBAAA,KAAA,E4C78JD,GACA,oBAAA,EAAA,GAGA,UACA,OAAA,KxCsCA,cAAA,KACQ,SAAA,OJ26JT,iBAAA,Q4C78JC,cAAe,IACf,mBAAA,MAAA,EAAA,IAAA,IAAA,eACA,WAAA,MAAA,EAAA,IAAA,IAAA,eAEA,cACA,MAAA,KACA,MAAA,EACA,OAAA,KACA,UAAA,KxCyBA,YAAA,KACQ,MAAA,KAyHR,WAAA,OACK,iBAAA,QACG,mBAAA,MAAA,EAAA,KAAA,EAAA,gBJ+zJT,WAAA,MAAA,EAAA,KAAA,EAAA,gB4C18JC,mBAAoB,MAAM,IAAI,K3Cq+JzB,cAAe,MAAM,IAAI,K4Cp+J5B,WAAA,MAAA,IAAA,KDEF,sBCAE,gCDAF,iBAAA,yK5C88JD,iBAAA,oK4Cv8JC,iBAAiB,iK3Cm+JjB,wBAAyB,KAAK,KG/gK9B,gBAAA,KAAA,KJy/JD,qBIv/JS,+BwCmDR,kBAAmB,qBAAqB,GAAG,OAAO,SErElD,aAAA,qBAAA,GAAA,OAAA,S9C4gKD,UAAA,qBAAA,GAAA,OAAA,S6Cz9JG,sBACA,iBAAA,Q7C69JH,wC4Cx8JC,iBAAkB,yKEzElB,iBAAA,oK9CohKD,iBAAA,iK6Cj+JG,mBACA,iBAAA,Q7Cq+JH,qC4C58JC,iBAAkB,yKE7ElB,iBAAA,oK9C4hKD,iBAAA,iK6Cz+JG,sBACA,iBAAA,Q7C6+JH,wC4Ch9JC,iBAAkB,yKEjFlB,iBAAA,oK9CoiKD,iBAAA,iK6Cj/JG,qBACA,iBAAA,Q7Cq/JH,uC+C5iKC,iBAAkB,yKAElB,iBAAA,oK/C6iKD,iBAAA,iK+C1iKG,O/C6iKH,WAAA,KC4BD,mB8CnkKE,WAAA,E/C4iKD,O+CxiKD,YACE,SAAA,O/C0iKD,KAAA,E+CtiKC,Y/CyiKD,MAAA,Q+CriKG,c/CwiKH,QAAA,MC4BD,4B8C9jKE,UAAA,KAGF,aAAA,mBAEE,aAAA,KAGF,YAAA,kB9C+jKE,cAAe,K8CxjKjB,YAHE,Y/CoiKD,a+ChiKC,QAAA,W/CmiKD,eAAA,I+C/hKC,c/CkiKD,eAAA,O+C7hKC,cACA,eAAA,OAMF,eACE,WAAA,EACA,cAAA,ICvDF,YAEE,aAAA,EACA,WAAA,KAQF,YACE,aAAA,EACA,cAAA,KAGA,iBACA,SAAA,SACA,QAAA,MhD6kKD,QAAA,KAAA,KgD1kKC,cAAA,KrB3BA,iBAAA,KACC,OAAA,IAAA,MAAA,KqB6BD,6BACE,uBAAA,IrBvBF,wBAAA,I3BsmKD,4BgDpkKC,cAAe,E/CgmKf,2BAA4B,I+C9lK5B,0BAAA,IAFF,kBAAA,uBAKI,MAAA,KAIF,2CAAA,gD/CgmKA,MAAO,K+C5lKL,wBAFA,wBhDykKH,6BgDxkKG,6BAKF,MAAO,KACP,gBAAA,KACA,iBAAA,QAKA,uB/C4lKA,MAAO,KACP,WAAY,K+CzlKV,0BhDmkKH,gCgDlkKG,gCALF,MAAA,K/CmmKA,OAAQ,YACR,iBAAkB,KDxBnB,mDgD5kKC,yDAAA,yD/CymKA,MAAO,QDxBR,gDgDhkKC,sDAAA,sD/C6lKA,MAAO,K+CzlKL,wBAEA,8BADA,8BhDmkKH,QAAA,EgDxkKC,MAAA,K/ComKA,iBAAkB,QAClB,aAAc,QAEhB,iDDpBC,wDCuBD,uDADA,uD+CzmKE,8DAYI,6D/C4lKN,uD+CxmKE,8D/C2mKF,6DAKE,MAAO,QDxBR,8CiD1qKG,oDADF,oDAEE,MAAA,QAEA,yBhDusKF,MAAO,QgDrsKH,iBAAA,QAFF,0BAAA,+BAKI,MAAA,QAGF,mDAAA,wDhDwsKJ,MAAO,QDtBR,gCiDhrKO,gCAGF,qCAFE,qChD2sKN,MAAO,QACP,iBAAkB,QAEpB,iCgDvsKQ,uCAFA,uChD0sKR,sCDtBC,4CiDnrKO,4CArBN,MAAA,KACE,iBAAA,QACA,aAAA,QAEA,sBhDouKF,MAAO,QgDluKH,iBAAA,QAFF,uBAAA,4BAKI,MAAA,QAGF,gDAAA,qDhDquKJ,MAAO,QDtBR,6BiD7sKO,6BAGF,kCAFE,kChDwuKN,MAAO,QACP,iBAAkB,QAEpB,8BgDpuKQ,oCAFA,oChDuuKR,mCDtBC,yCiDhtKO,yCArBN,MAAA,KACE,iBAAA,QACA,aAAA,QAEA,yBhDiwKF,MAAO,QgD/vKH,iBAAA,QAFF,0BAAA,+BAKI,MAAA,QAGF,mDAAA,wDhDkwKJ,MAAO,QDtBR,gCiD1uKO,gCAGF,qCAFE,qChDqwKN,MAAO,QACP,iBAAkB,QAEpB,iCgDjwKQ,uCAFA,uChDowKR,sCDtBC,4CiD7uKO,4CArBN,MAAA,KACE,iBAAA,QACA,aAAA,QAEA,wBhD8xKF,MAAO,QgD5xKH,iBAAA,QAFF,yBAAA,8BAKI,MAAA,QAGF,kDAAA,uDhD+xKJ,MAAO,QDtBR,+BiDvwKO,+BAGF,oCAFE,oChDkyKN,MAAO,QACP,iBAAkB,QAEpB,gCgD9xKQ,sCAFA,sChDiyKR,qCDtBC,2CiD1wKO,2CDkGN,MAAO,KACP,iBAAA,QACA,aAAA,QAEF,yBACE,WAAA,EACA,cAAA,IE1HF,sBACE,cAAA,EACA,YAAA,IAEA,O9C0DA,cAAA,KACQ,iBAAA,KJ6uKT,OAAA,IAAA,MAAA,YkDnyKC,cAAe,IACf,mBAAA,EAAA,IAAA,IAAA,gBlDqyKD,WAAA,EAAA,IAAA,IAAA,gBkD/xKC,YACA,QAAA,KvBnBC,e3BuzKF,QAAA,KAAA,KkDtyKC,cAAe,IAAI,MAAM,YAMvB,uBAAA,IlDmyKH,wBAAA,IkD7xKC,0CACA,MAAA,QAEA,alDgyKD,WAAA,EkDpyKC,cAAe,EjDg0Kf,UAAW,KACX,MAAO,QDtBR,oBkD1xKC,sBjDkzKF,eiDxzKI,mBAKJ,qBAEE,MAAA,QvBvCA,cACC,QAAA,KAAA,K3Bs0KF,iBAAA,QkDrxKC,WAAY,IAAI,MAAM,KjDizKtB,2BAA4B,IiD9yK1B,0BAAA,IAHJ,mBAAA,mCAMM,cAAA,ElDwxKL,oCkDnxKG,oDjD+yKF,aAAc,IAAI,EiD7yKZ,cAAA,EvBtEL,4D3B61KF,4EkDjxKG,WAAA,EjD6yKF,uBAAwB,IiD3yKlB,wBAAA,IvBtEL,0D3B21KF,0EkD1yKC,cAAe,EvB1Df,2BAAA,IACC,0BAAA,IuB0FH,+EAEI,uBAAA,ElD8wKH,wBAAA,EkD1wKC,wDlD6wKD,iBAAA,EC4BD,0BACE,iBAAkB,EiDlyKpB,8BlD0wKC,ckD1wKD,gCjDuyKE,cAAe,EiDvyKjB,sCAQM,sBlDwwKL,wCC4BC,cAAe,K0Br5Kf,aAAA,KuByGF,wDlDqxKC,0BC4BC,uBAAwB,IACxB,wBAAyB,IiDlzK3B,yFAoBQ,yFlDwwKP,2DkDzwKO,2DjDqyKN,uBAAwB,IACxB,wBAAyB,IAK3B,wGiD9zKA,wGjD4zKA,wGDtBC,wGCuBD,0EiD7zKA,0EjD2zKA,0EiDnyKU,0EjD2yKR,uBAAwB,IAK1B,uGiDx0KA,uGjDs0KA,uGDtBC,uGCuBD,yEiDv0KA,yEjDq0KA,yEiDzyKU,yEvB7HR,wBAAA,IuBiGF,sDlDqzKC,yBC4BC,2BAA4B,IAC5B,0BAA2B,IiDxyKrB,qFA1CR,qFAyCQ,wDlDmxKP,wDC4BC,2BAA4B,IAC5B,0BAA2B,IAG7B,oGDtBC,oGCwBD,oGiD91KA,oGjD21KA,uEiD7yKU,uEjD+yKV,uEiD71KA,uEjDm2KE,0BAA2B,IAG7B,mGDtBC,mGCwBD,mGiDx2KA,mGjDq2KA,sEiDnzKU,sEjDqzKV,sEiDv2KA,sEjD62KE,2BAA4B,IiDlzK1B,0BlD2xKH,qCkDt1KD,0BAAA,qCA+DI,WAAA,IAAA,MAAA,KA/DJ,kDAAA,kDAmEI,WAAA,EAnEJ,uBAAA,yCjD23KE,OAAQ,EiDjzKA,+CjDqzKV,+CiD/3KA,+CjDi4KA,+CAEA,+CANA,+CDjBC,iECoBD,iEiDh4KA,iEjDk4KA,iEAEA,iEANA,iEAWE,YAAa,EiD3zKL,8CjD+zKV,8CiD74KA,8CjD+4KA,8CAEA,8CANA,8CDjBC,gECoBD,gEiD94KA,gEjDg5KA,gEAEA,gEANA,gEAWE,aAAc,EAIhB,+CiD35KA,+CjDy5KA,+CiDl0KU,+CjDq0KV,iEiD55KA,iEjD05KA,iEDtBC,iEC6BC,cAAe,EAEjB,8CiDn0KU,8CjDq0KV,8CiDr6KA,8CjDo6KA,gEDtBC,gECwBD,gEiDh0KI,gEACA,cAAA,EAUJ,yBACE,cAAA,ElDmyKD,OAAA,EkD/xKG,aACA,cAAA,KANJ,oBASM,cAAA,ElDkyKL,cAAA,IkD7xKG,2BlDgyKH,WAAA,IC4BD,4BiDxzKM,cAAA,EAKF,wDAvBJ,wDlDqzKC,WAAA,IAAA,MAAA,KkD5xKK,2BlD+xKL,WAAA,EmDlhLC,uDnDqhLD,cAAA,IAAA,MAAA,KmDlhLG,eACA,aAAA,KnDshLH,8BmDxhLC,MAAA,KAMI,iBAAA,QnDqhLL,aAAA,KmDlhLK,0DACA,iBAAA,KAGJ,qCAEI,MAAA,QnDmhLL,iBAAA,KmDpiLC,yDnDuiLD,oBAAA,KmDpiLG,eACA,aAAA,QnDwiLH,8BmD1iLC,MAAA,KAMI,iBAAA,QnDuiLL,aAAA,QmDpiLK,0DACA,iBAAA,QAGJ,qCAEI,MAAA,QnDqiLL,iBAAA,KmDtjLC,yDnDyjLD,oBAAA,QmDtjLG,eACA,aAAA,QnD0jLH,8BmD5jLC,MAAA,QAMI,iBAAA,QnDyjLL,aAAA,QmDtjLK,0DACA,iBAAA,QAGJ,qCAEI,MAAA,QnDujLL,iBAAA,QmDxkLC,yDnD2kLD,oBAAA,QmDxkLG,YACA,aAAA,QnD4kLH,2BmD9kLC,MAAA,QAMI,iBAAA,QnD2kLL,aAAA,QmDxkLK,uDACA,iBAAA,QAGJ,kCAEI,MAAA,QnDykLL,iBAAA,QmD1lLC,sDnD6lLD,oBAAA,QmD1lLG,eACA,aAAA,QnD8lLH,8BmDhmLC,MAAA,QAMI,iBAAA,QnD6lLL,aAAA,QmD1lLK,0DACA,iBAAA,QAGJ,qCAEI,MAAA,QnD2lLL,iBAAA,QmD5mLC,yDnD+mLD,oBAAA,QmD5mLG,cACA,aAAA,QnDgnLH,6BmDlnLC,MAAA,QAMI,iBAAA,QnD+mLL,aAAA,QmD5mLK,yDACA,iBAAA,QAGJ,oCAEI,MAAA,QnD6mLL,iBAAA,QoD5nLC,wDACA,oBAAA,QAEA,kBACA,SAAA,SpD+nLD,QAAA,MoDpoLC,OAAQ,EnDgqLR,QAAS,EACT,SAAU,OAEZ,yCmDtpLI,wBADA,yBAEA,yBACA,wBACA,SAAA,SACA,IAAA,EACA,OAAA,EpD+nLH,KAAA,EoD1nLC,MAAO,KACP,OAAA,KpD4nLD,OAAA,EoDvnLC,wBpD0nLD,eAAA,OqDppLC,uBACA,eAAA,IAEA,MACA,WAAA,KACA,QAAA,KjDwDA,cAAA,KACQ,iBAAA,QJgmLT,OAAA,IAAA,MAAA,QqD/pLC,cAAe,IASb,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACA,WAAA,MAAA,EAAA,IAAA,IAAA,gBAKJ,iBACE,aAAA,KACA,aAAA,gBAEF,SACE,QAAA,KACA,cAAA,ICtBF,SACE,QAAA,IACA,cAAA,IAEA,OACA,MAAA,MACA,UAAA,KjCRA,YAAA,IAGA,YAAA,ErBqrLD,MAAA,KsD7qLC,YAAA,EAAA,IAAA,EAAA,KrDysLA,OAAQ,kBqDvsLN,QAAA,GjCbF,aiCeE,ajCZF,MAAA,KrB6rLD,gBAAA,KsDzqLC,OAAA,QACE,OAAA,kBACA,QAAA,GAEA,aACA,mBAAA,KtD2qLH,QAAA,EuDhsLC,OAAQ,QACR,WAAA,IvDksLD,OAAA,EuD7rLC,YACA,SAAA,OAEA,OACA,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EAIA,QAAA,KvD6rLD,QAAA,KuD1rLC,SAAA,OnD+GA,2BAAA,MACI,QAAA,EAEI,0BAkER,mBAAA,kBAAA,IAAA,SAEK,cAAA,aAAA,IAAA,SACG,WAAA,UAAA,IAAA,SJ6gLT,kBAAA,kBuDhsLC,cAAA,kBnD2GA,aAAA,kBACI,UAAA,kBAEI,wBJwlLT,kBAAA,euDpsLK,cAAe,eACnB,aAAA,eACA,UAAA,eAIF,mBACE,WAAA,OACA,WAAA,KvDqsLD,cuDhsLC,SAAU,SACV,MAAA,KACA,OAAA,KAEA,eACA,SAAA,SnDaA,iBAAA,KACQ,wBAAA,YmDZR,gBAAA,YtD4tLA,OsD5tLA,IAAA,MAAA,KAEA,OAAA,IAAA,MAAA,evDksLD,cAAA,IuD9rLC,QAAS,EACT,mBAAA,EAAA,IAAA,IAAA,eACA,WAAA,EAAA,IAAA,IAAA,eAEA,gBACA,SAAA,MACA,IAAA,EACA,MAAA,EvDgsLD,OAAA,EuD9rLC,KAAA,ElCrEA,QAAA,KAGA,iBAAA,KkCmEA,qBlCtEA,OAAA,iBAGA,QAAA,EkCwEF,mBACE,OAAA,kBACA,QAAA,GAIF,cACE,QAAA,KvDgsLD,cAAA,IAAA,MAAA,QuD3rLC,qBACA,WAAA,KAKF,aACE,OAAA,EACA,YAAA,WAIF,YACE,SAAA,SACA,QAAA,KvD0rLD,cuD5rLC,QAAS,KAQP,WAAA,MACA,WAAA,IAAA,MAAA,QATJ,wBAaI,cAAA,EvDsrLH,YAAA,IuDlrLG,mCvDqrLH,YAAA,KuD/qLC,oCACA,YAAA,EAEA,yBACA,SAAA,SvDkrLD,IAAA,QuDhqLC,MAAO,KAZP,OAAA,KACE,SAAA,OvDgrLD,yBuD7qLD,cnDvEA,MAAA,MACQ,OAAA,KAAA,KmD2ER,eAAY,mBAAA,EAAA,IAAA,KAAA,evD+qLX,WAAA,EAAA,IAAA,KAAA,euDzqLD,UAFA,MAAA,OvDirLD,yBwD/zLC,UACA,MAAA,OCNA,SAEA,SAAA,SACA,QAAA,KACA,QAAA,MACA,YAAA,iBAAA,UAAA,MAAA,WACA,UAAA,KACA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,eAAA,ODHA,WAAA,OnCVA,aAAA,OAGA,UAAA,OrBs1LD,YAAA,OwD30LC,OAAA,iBnCdA,QAAA,ErB61LD,WAAA,KwD90LY,YAAmB,OAAA,kBxDk1L/B,QAAA,GwDj1LY,aAAmB,QAAA,IAAA,ExDq1L/B,WAAA,KwDp1LY,eAAmB,QAAA,EAAA,IxDw1L/B,YAAA,IwDv1LY,gBAAmB,QAAA,IAAA,ExD21L/B,WAAA,IwDt1LC,cACA,QAAA,EAAA,IACA,YAAA,KAEA,eACA,UAAA,MxDy1LD,QAAA,IAAA,IwDr1LC,MAAO,KACP,WAAA,OACA,iBAAA,KACA,cAAA,IAEA,exDu1LD,SAAA,SwDn1LC,MAAA,EACE,OAAA,EACA,aAAA,YACA,aAAA,MAEA,4BxDq1LH,OAAA,EwDn1LC,KAAA,IACE,YAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEA,iCxDq1LH,MAAA,IwDn1LC,OAAA,EACE,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEA,kCxDq1LH,OAAA,EwDn1LC,KAAA,IACE,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEA,8BxDq1LH,IAAA,IwDn1LC,KAAA,EACE,WAAA,KACA,aAAA,IAAA,IAAA,IAAA,EACA,mBAAA,KAEA,6BxDq1LH,IAAA,IwDn1LC,MAAA,EACE,WAAA,KACA,aAAA,IAAA,EAAA,IAAA,IACA,kBAAA,KAEA,+BxDq1LH,IAAA,EwDn1LC,KAAA,IACE,YAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEA,oCxDq1LH,IAAA,EwDn1LC,MAAA,IACE,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEA,qCxDq1LH,IAAA,E0Dl7LC,KAAM,IACN,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEA,SACA,SAAA,SACA,IAAA,EDXA,KAAA,EAEA,QAAA,KACA,QAAA,KACA,UAAA,MACA,QAAA,IACA,YAAA,iBAAA,UAAA,MAAA,WACA,UAAA,KACA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KCAA,eAAA,OAEA,WAAA,OACA,aAAA,OAAA,UAAA,OACA,YAAA,OACA,iBAAA,KACA,wBAAA,YtD8CA,gBAAA,YACQ,OAAA,IAAA,MAAA,KJk5LT,OAAA,IAAA,MAAA,e0D77LC,cAAA,IAAY,mBAAA,EAAA,IAAA,KAAA,e1Dg8Lb,WAAA,EAAA,IAAA,KAAA,e0D/7La,WAAA,KACZ,aAAY,WAAA,MACZ,eAAY,YAAA,KAGd,gBACE,WAAA,KAEA,cACA,YAAA,MAEA,e1Dq8LD,QAAA,IAAA,K0Dl8LC,OAAQ,EACR,UAAA,K1Do8LD,iBAAA,Q0D57LC,cAAA,IAAA,MAAA,QzDy9LA,cAAe,IAAI,IAAI,EAAE,EyDt9LvB,iBACA,QAAA,IAAA,KAEA,gBACA,sB1D87LH,SAAA,S0D37LC,QAAS,MACT,MAAA,E1D67LD,OAAA,E0D37LC,aAAc,YACd,aAAA,M1D87LD,gB0Dz7LC,aAAA,KAEE,sBACA,QAAA,GACA,aAAA,KAEA,oB1D27LH,OAAA,M0D17LG,KAAA,IACE,YAAA,MACA,iBAAA,KACA,iBAAA,gBACA,oBAAA,E1D67LL,0B0Dz7LC,OAAA,IACE,YAAA,MACA,QAAA,IACA,iBAAA,KACA,oBAAA,EAEA,sB1D27LH,IAAA,I0D17LG,KAAA,MACE,WAAA,MACA,mBAAA,KACA,mBAAA,gBACA,kBAAA,E1D67LL,4B0Dz7LC,OAAA,MACE,KAAA,IACA,QAAA,IACA,mBAAA,KACA,kBAAA,EAEA,uB1D27LH,IAAA,M0D17LG,KAAA,IACE,YAAA,MACA,iBAAA,EACA,oBAAA,KACA,oBAAA,gB1D67LL,6B0Dx7LC,IAAA,IACE,YAAA,MACA,QAAA,IACA,iBAAA,EACA,oBAAA,KAEA,qB1D07LH,IAAA,I0Dz7LG,MAAA,MACE,WAAA,MACA,mBAAA,EACA,kBAAA,KACA,kBAAA,gB1D47LL,2B2DpjMC,MAAO,IACP,OAAA,M3DsjMD,QAAA,I2DnjMC,mBAAoB,EACpB,kBAAA,KAEA,U3DqjMD,SAAA,S2DljMG,gBACA,SAAA,SvD6KF,MAAA,KACK,SAAA,OJ04LN,sB2D/jMC,SAAU,S1D4lMV,QAAS,K0D9kML,mBAAA,IAAA,YAAA,K3DqjML,cAAA,IAAA,YAAA,K2D3hMC,WAAA,IAAA,YAAA,KvDmKK,4BAFL,0BAGQ,YAAA,EA3JA,qDA+GR,sBAEQ,mBAAA,kBAAA,IAAA,YJ86LP,cAAA,aAAA,IAAA,Y2DzjMG,WAAA,UAAA,IAAA,YvDmHJ,4BAAA,OACQ,oBAAA,OuDjHF,oBAAA,O3D4jML,YAAA,OI58LD,mCHs+LA,2BGr+LQ,KAAA,EuD5GF,kBAAA,sB3D6jML,UAAA,sBC2BD,kCADA,2BG5+LA,KAAA,EACQ,kBAAA,uBuDtGF,UAAA,uBArCN,6B3DomMD,gC2DpmMC,iC1D+nME,KAAM,E0DllMN,kBAAA,mB3D4jMH,UAAA,oBAGA,wB2D5mMD,sBAAA,sBAsDI,QAAA,MAEA,wB3D0jMH,KAAA,E2DtjMG,sB3DyjMH,sB2DrnMC,SAAU,SA+DR,IAAA,E3DyjMH,MAAA,KC0BD,sB0D/kMI,KAAA,KAnEJ,sBAuEI,KAAA,MAvEJ,2BA0EI,4B3DwjMH,KAAA,E2D/iMC,6BACA,KAAA,MAEA,8BACA,KAAA,KtC3FA,kBsC6FA,SAAA,SACA,IAAA,EACA,OAAA,EACA,KAAA,EACA,MAAA,I3DmjMD,UAAA,K2D9iMC,MAAA,KdnGE,WAAA,OACA,YAAA,EAAA,IAAA,IAAA,eACA,iBAAA,cAAA,OAAA,kBACA,QAAA,G7CqpMH,uB2DljMC,iBAAA,sEACE,iBAAA,iEACA,iBAAA,uFdxGA,iBAAA,kEACA,OAAA,+GACA,kBAAA,SACA,wBACA,MAAA,E7C6pMH,KAAA,K2DpjMC,iBAAA,sE1DglMA,iBAAiB,iE0D9kMf,iBAAA,uFACA,iBAAA,kEACA,OAAA,+GtCvHF,kBAAA,SsCyFF,wB3DslMC,wBC4BC,MAAO,KACP,gBAAiB,KACjB,OAAQ,kB0D7kMN,QAAA,EACA,QAAA,G3DwjMH,0C2DhmMD,2CA2CI,6BADA,6B1DklMF,SAAU,S0D7kMR,IAAA,IACA,QAAA,E3DqjMH,QAAA,a2DrmMC,WAAY,MAqDV,0CADA,6B3DsjMH,KAAA,I2D1mMC,YAAa,MA0DX,2CADA,6BAEA,MAAA,IACA,aAAA,MAME,6BADF,6B3DmjMH,MAAA,K2D9iMG,OAAA,KACE,YAAA,M3DgjML,YAAA,E2DriMC,oCACA,QAAA,QAEA,oCACA,QAAA,QAEA,qBACA,SAAA,SACA,OAAA,K3DwiMD,KAAA,I2DjjMC,QAAS,GAYP,MAAA,IACA,aAAA,EACA,YAAA,KACA,WAAA,OACA,WAAA,KAEA,wBACA,QAAA,aAWA,MAAA,KACA,OAAA,K3D8hMH,OAAA,I2D7jMC,YAAa,OAkCX,OAAA,QACA,iBAAA,OACA,iBAAA,cACA,OAAA,IAAA,MAAA,K3D8hMH,cAAA,K2DthMC,6BACA,MAAA,KACA,OAAA,KACA,OAAA,EACA,iBAAA,KAEA,kBACA,SAAA,SACA,MAAA,IACA,OAAA,K3DyhMD,KAAA,I2DxhMC,QAAA,GACE,YAAA,K3D0hMH,eAAA,K2Dj/LC,MAAO,KAhCP,WAAA,O1D8iMA,YAAa,EAAE,IAAI,IAAI,eAEzB,uB0D3iMM,YAAA,KAEA,oCACA,0C3DmhMH,2C2D3hMD,6BAAA,6BAYI,MAAA,K3DmhMH,OAAA,K2D/hMD,WAAA,M1D2jME,UAAW,KDxBZ,0C2D9gMD,6BACE,YAAA,MAEA,2C3DghMD,6B2D5gMD,aAAA,M3D+gMC,kBACF,MAAA,I4D7wMC,KAAA,I3DyyME,eAAgB,KAElB,qBACE,OAAQ,MAkBZ,qCADA,sCADA,mBADA,oBAXA,gBADA,iBAOA,uBADA,wBADA,iBADA,kBADA,wBADA,yBASA,mCADA,oC2DpzME,oBAAA,qBAAA,oBAAA,qB3D2zMF,WADA,YAOA,uBADA,wBADA,qBADA,sBADA,cADA,e2D/zMI,a3Dq0MJ,cDvBC,kB4D7yMG,mB3DqzMJ,WADA,YAwBE,QAAS,MACT,QAAS,IASX,qCADA,mBANA,gBAGA,uBADA,iBADA,wBAIA,mCDhBC,oB6D/0MC,oB5Dk2MF,W+B51MA,uBhCo0MC,qB4D5zMG,cChBF,aACA,kB5D+1MF,W+Br1ME,MAAO,KhCy0MR,cgCt0MC,QAAS,MACT,aAAA,KhCw0MD,YAAA,KgC/zMC,YhCk0MD,MAAA,gBgC/zMC,WhCk0MD,MAAA,egC/zMC,MhCk0MD,QAAA,e8Dz1MC,MACA,QAAA,gBAEA,WACA,WAAA,O9B8BF,WACE,KAAA,EAAA,EAAA,EhCg0MD,MAAA,YgCzzMC,YAAa,KACb,iBAAA,YhC2zMD,OAAA,E+D31MC,Q/D81MD,QAAA,eC4BD,OACE,SAAU,M+Dn4MV,chE42MD,MAAA,aC+BD,YADA,YADA,YADA,YAIE,QAAS,e+Dp5MT,kBhEs4MC,mBgEr4MD,yBhEi4MD,kB+Dl1MD,mBA6IA,yB9D4tMA,kBACA,mB8Dj3ME,yB9D62MF,kBACA,mBACA,yB+Dv5MY,QAAA,eACV,yBAAU,YhE04MT,QAAA,gBC4BD,iB+Dp6MU,QAAA,gBhE64MX,c+D51MG,QAAS,oB/Dg2MV,c+Dl2MC,c/Dm2MH,QAAA,sB+D91MG,yB/Dk2MD,kBACF,QAAA,iB+D91MG,yB/Dk2MD,mBACF,QAAA,kBgEh6MC,yBhEo6MC,yBgEn6MD,QAAA,wBACA,+CAAU,YhEw6MT,QAAA,gBC4BD,iB+Dl8MU,QAAA,gBhE26MX,c+Dr2MG,QAAS,oB/Dy2MV,c+D32MC,c/D42MH,QAAA,sB+Dv2MG,+C/D22MD,kBACF,QAAA,iB+Dv2MG,+C/D22MD,mBACF,QAAA,kBgE97MC,+ChEk8MC,yBgEj8MD,QAAA,wBACA,gDAAU,YhEs8MT,QAAA,gBC4BD,iB+Dh+MU,QAAA,gBhEy8MX,c+D92MG,QAAS,oB/Dk3MV,c+Dp3MC,c/Dq3MH,QAAA,sB+Dh3MG,gD/Do3MD,kBACF,QAAA,iB+Dh3MG,gD/Do3MD,mBACF,QAAA,kBgE59MC,gDhEg+MC,yBgE/9MD,QAAA,wBACA,0BAAU,YhEo+MT,QAAA,gBC4BD,iB+D9/MU,QAAA,gBhEu+MX,c+Dv3MG,QAAS,oB/D23MV,c+D73MC,c/D83MH,QAAA,sB+Dz3MG,0B/D63MD,kBACF,QAAA,iB+Dz3MG,0B/D63MD,mBACF,QAAA,kBgEl/MC,0BhEs/MC,yBACF,QAAA,wBgEv/MC,yBhE2/MC,WACF,QAAA,gBgE5/MC,+ChEggNC,WACF,QAAA,gBgEjgNC,gDhEqgNC,WACF,QAAA,gBAGA,0B+Dh3MC,WA4BE,QAAS,gBC5LX,eAAU,QAAA,eACV,aAAU,ehEyhNT,QAAA,gBC4BD,oB+DnjNU,QAAA,gBhE4hNX,iB+D93MG,QAAS,oBAMX,iB/D23MD,iB+Dt2MG,QAAS,sB/D22MZ,qB+D/3MC,QAAS,e/Dk4MV,a+D53MC,qBAcE,QAAS,iB/Dm3MZ,sB+Dh4MC,QAAS,e/Dm4MV,a+D73MC,sBAOE,QAAS,kB/D23MZ,4B+D53MC,QAAS,eCpLT,ahEojNC,4BACF,QAAA,wBC6BD,aACE,cACE,QAAS","sourcesContent":["/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS and IE text size adjust after device orientation change,\n// without disabling user zoom.\n//\n\nhtml {\n font-family: sans-serif; // 1\n -ms-text-size-adjust: 100%; // 2\n -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n margin: 0;\n}\n\n// HTML5 display definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined for any HTML5 element in IE 8/9.\n// Correct `block` display not defined for `details` or `summary` in IE 10/11\n// and Firefox.\n// Correct `block` display not defined for `main` in IE 11.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block; // 1\n vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9/10.\n// Hide the `template` element in IE 8/9/10/11, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n background-color: transparent;\n}\n\n//\n// Improve readability of focused elements when they are also in an\n// active/hover state.\n//\n\na:active,\na:hover {\n outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// Address styling not present in IE 8/9/10/11, Safari, and Chrome.\n//\n\nabbr[title] {\n border-bottom: 1px dotted;\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari, and Chrome.\n//\n\nb,\nstrong {\n font-weight: bold;\n}\n\n//\n// Address styling not present in Safari and Chrome.\n//\n\ndfn {\n font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `article`\n// contexts in Firefox 4+, Safari, and Chrome.\n//\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n background: #ff0;\n color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsup {\n top: -0.5em;\n}\n\nsub {\n bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9/10.\n//\n\nimg {\n border: 0;\n}\n\n//\n// Correct overflow not hidden in IE 9/10/11.\n//\n\nsvg:not(:root) {\n overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not present in IE 8/9 and Safari.\n//\n\nfigure {\n margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n box-sizing: content-box;\n height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n// Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n color: inherit; // 1\n font: inherit; // 2\n margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10/11.\n//\n\nbutton {\n overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n// and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n// `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button; // 2\n cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n border: 0;\n padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n box-sizing: border-box; // 1\n padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari and Chrome.\n//\n\ninput[type=\"search\"] {\n -webkit-appearance: textfield; // 1\n box-sizing: content-box; //2\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n border: 1px solid #c0c0c0;\n margin: 0 2px;\n padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9/10/11.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n border: 0; // 1\n padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9/10/11.\n//\n\ntextarea {\n overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n border-collapse: collapse;\n border-spacing: 0;\n}\n\ntd,\nth {\n padding: 0;\n}\n","/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n\n// ==========================================================================\n// Print styles.\n// Inlined to avoid the additional HTTP request: h5bp.com/r\n// ==========================================================================\n\n@media print {\n *,\n *:before,\n *:after {\n background: transparent !important;\n color: #000 !important; // Black prints faster: h5bp.com/s\n box-shadow: none !important;\n text-shadow: none !important;\n }\n\n a,\n a:visited {\n text-decoration: underline;\n }\n\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n\n // Don't show links that are fragment identifiers,\n // or use the `javascript:` pseudo protocol\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n\n pre,\n blockquote {\n border: 1px solid #999;\n page-break-inside: avoid;\n }\n\n thead {\n display: table-header-group; // h5bp.com/t\n }\n\n tr,\n img {\n page-break-inside: avoid;\n }\n\n img {\n max-width: 100% !important;\n }\n\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n\n h2,\n h3 {\n page-break-after: avoid;\n }\n\n // Bootstrap specific changes start\n\n // Bootstrap components\n .navbar {\n display: none;\n }\n .btn,\n .dropup > .btn {\n > .caret {\n border-top-color: #000 !important;\n }\n }\n .label {\n border: 1px solid #000;\n }\n\n .table {\n border-collapse: collapse !important;\n\n td,\n th {\n background-color: #fff !important;\n }\n }\n .table-bordered {\n th,\n td {\n border: 1px solid #ddd !important;\n }\n }\n\n // Bootstrap specific changes end\n}\n","/*!\n * Bootstrap v3.3.7 (http://getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n font-family: sans-serif;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n}\nbody {\n margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block;\n vertical-align: baseline;\n}\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n[hidden],\ntemplate {\n display: none;\n}\na {\n background-color: transparent;\n}\na:active,\na:hover {\n outline: 0;\n}\nabbr[title] {\n border-bottom: 1px dotted;\n}\nb,\nstrong {\n font-weight: bold;\n}\ndfn {\n font-style: italic;\n}\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\nmark {\n background: #ff0;\n color: #000;\n}\nsmall {\n font-size: 80%;\n}\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\nsup {\n top: -0.5em;\n}\nsub {\n bottom: -0.25em;\n}\nimg {\n border: 0;\n}\nsvg:not(:root) {\n overflow: hidden;\n}\nfigure {\n margin: 1em 40px;\n}\nhr {\n box-sizing: content-box;\n height: 0;\n}\npre {\n overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n color: inherit;\n font: inherit;\n margin: 0;\n}\nbutton {\n overflow: visible;\n}\nbutton,\nselect {\n text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button;\n cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n border: 0;\n padding: 0;\n}\ninput {\n line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n box-sizing: border-box;\n padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: textfield;\n box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\nfieldset {\n border: 1px solid #c0c0c0;\n margin: 0 2px;\n padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n border: 0;\n padding: 0;\n}\ntextarea {\n overflow: auto;\n}\noptgroup {\n font-weight: bold;\n}\ntable {\n border-collapse: collapse;\n border-spacing: 0;\n}\ntd,\nth {\n padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n *,\n *:before,\n *:after {\n background: transparent !important;\n color: #000 !important;\n box-shadow: none !important;\n text-shadow: none !important;\n }\n a,\n a:visited {\n text-decoration: underline;\n }\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n pre,\n blockquote {\n border: 1px solid #999;\n page-break-inside: avoid;\n }\n thead {\n display: table-header-group;\n }\n tr,\n img {\n page-break-inside: avoid;\n }\n img {\n max-width: 100% !important;\n }\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n h2,\n h3 {\n page-break-after: avoid;\n }\n .navbar {\n display: none;\n }\n .btn > .caret,\n .dropup > .btn > .caret {\n border-top-color: #000 !important;\n }\n .label {\n border: 1px solid #000;\n }\n .table {\n border-collapse: collapse !important;\n }\n .table td,\n .table th {\n background-color: #fff !important;\n }\n .table-bordered th,\n .table-bordered td {\n border: 1px solid #ddd !important;\n }\n}\n@font-face {\n font-family: 'Glyphicons Halflings';\n src: url('../fonts/glyphicons-halflings-regular.eot');\n src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: 'Glyphicons Halflings';\n font-style: normal;\n font-weight: normal;\n line-height: 1;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n content: \"\\002a\";\n}\n.glyphicon-plus:before {\n content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n content: \"\\270f\";\n}\n.glyphicon-glass:before {\n content: \"\\e001\";\n}\n.glyphicon-music:before {\n content: \"\\e002\";\n}\n.glyphicon-search:before {\n content: \"\\e003\";\n}\n.glyphicon-heart:before {\n content: \"\\e005\";\n}\n.glyphicon-star:before {\n content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n content: \"\\e007\";\n}\n.glyphicon-user:before {\n content: \"\\e008\";\n}\n.glyphicon-film:before {\n content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n content: \"\\e010\";\n}\n.glyphicon-th:before {\n content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n content: \"\\e012\";\n}\n.glyphicon-ok:before {\n content: \"\\e013\";\n}\n.glyphicon-remove:before {\n content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n content: \"\\e016\";\n}\n.glyphicon-off:before {\n content: \"\\e017\";\n}\n.glyphicon-signal:before {\n content: \"\\e018\";\n}\n.glyphicon-cog:before {\n content: \"\\e019\";\n}\n.glyphicon-trash:before {\n content: \"\\e020\";\n}\n.glyphicon-home:before {\n content: \"\\e021\";\n}\n.glyphicon-file:before {\n content: \"\\e022\";\n}\n.glyphicon-time:before {\n content: \"\\e023\";\n}\n.glyphicon-road:before {\n content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n content: \"\\e025\";\n}\n.glyphicon-download:before {\n content: \"\\e026\";\n}\n.glyphicon-upload:before {\n content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n content: \"\\e032\";\n}\n.glyphicon-lock:before {\n content: \"\\e033\";\n}\n.glyphicon-flag:before {\n content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n content: \"\\e040\";\n}\n.glyphicon-tag:before {\n content: \"\\e041\";\n}\n.glyphicon-tags:before {\n content: \"\\e042\";\n}\n.glyphicon-book:before {\n content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n content: \"\\e044\";\n}\n.glyphicon-print:before {\n content: \"\\e045\";\n}\n.glyphicon-camera:before {\n content: \"\\e046\";\n}\n.glyphicon-font:before {\n content: \"\\e047\";\n}\n.glyphicon-bold:before {\n content: \"\\e048\";\n}\n.glyphicon-italic:before {\n content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n content: \"\\e055\";\n}\n.glyphicon-list:before {\n content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n content: \"\\e059\";\n}\n.glyphicon-picture:before {\n content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n content: \"\\e063\";\n}\n.glyphicon-tint:before {\n content: \"\\e064\";\n}\n.glyphicon-edit:before {\n content: \"\\e065\";\n}\n.glyphicon-share:before {\n content: \"\\e066\";\n}\n.glyphicon-check:before {\n content: \"\\e067\";\n}\n.glyphicon-move:before {\n content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n content: \"\\e070\";\n}\n.glyphicon-backward:before {\n content: \"\\e071\";\n}\n.glyphicon-play:before {\n content: \"\\e072\";\n}\n.glyphicon-pause:before {\n content: \"\\e073\";\n}\n.glyphicon-stop:before {\n content: \"\\e074\";\n}\n.glyphicon-forward:before {\n content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n content: \"\\e077\";\n}\n.glyphicon-eject:before {\n content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n content: \"\\e101\";\n}\n.glyphicon-gift:before {\n content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n content: \"\\e103\";\n}\n.glyphicon-fire:before {\n content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n content: \"\\e107\";\n}\n.glyphicon-plane:before {\n content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n content: \"\\e109\";\n}\n.glyphicon-random:before {\n content: \"\\e110\";\n}\n.glyphicon-comment:before {\n content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n content: \"\\e122\";\n}\n.glyphicon-bell:before {\n content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n content: \"\\e134\";\n}\n.glyphicon-globe:before {\n content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n content: \"\\e137\";\n}\n.glyphicon-filter:before {\n content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n content: \"\\e143\";\n}\n.glyphicon-link:before {\n content: \"\\e144\";\n}\n.glyphicon-phone:before {\n content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n content: \"\\e146\";\n}\n.glyphicon-usd:before {\n content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n content: \"\\e149\";\n}\n.glyphicon-sort:before {\n content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n content: \"\\e157\";\n}\n.glyphicon-expand:before {\n content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n content: \"\\e161\";\n}\n.glyphicon-flash:before {\n content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n content: \"\\e164\";\n}\n.glyphicon-record:before {\n content: \"\\e165\";\n}\n.glyphicon-save:before {\n content: \"\\e166\";\n}\n.glyphicon-open:before {\n content: \"\\e167\";\n}\n.glyphicon-saved:before {\n content: \"\\e168\";\n}\n.glyphicon-import:before {\n content: \"\\e169\";\n}\n.glyphicon-export:before {\n content: \"\\e170\";\n}\n.glyphicon-send:before {\n content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n content: \"\\e179\";\n}\n.glyphicon-header:before {\n content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n content: \"\\e183\";\n}\n.glyphicon-tower:before {\n content: \"\\e184\";\n}\n.glyphicon-stats:before {\n content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n content: \"\\e200\";\n}\n.glyphicon-cd:before {\n content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n content: \"\\e204\";\n}\n.glyphicon-copy:before {\n content: \"\\e205\";\n}\n.glyphicon-paste:before {\n content: \"\\e206\";\n}\n.glyphicon-alert:before {\n content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n content: \"\\e210\";\n}\n.glyphicon-king:before {\n content: \"\\e211\";\n}\n.glyphicon-queen:before {\n content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n content: \"\\e214\";\n}\n.glyphicon-knight:before {\n content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n content: \"\\e216\";\n}\n.glyphicon-tent:before {\n content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n content: \"\\e218\";\n}\n.glyphicon-bed:before {\n content: \"\\e219\";\n}\n.glyphicon-apple:before {\n content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n content: \"\\e227\";\n}\n.glyphicon-btc:before {\n content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n content: \"\\e227\";\n}\n.glyphicon-yen:before {\n content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n content: \"\\e232\";\n}\n.glyphicon-education:before {\n content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n content: \"\\e237\";\n}\n.glyphicon-oil:before {\n content: \"\\e238\";\n}\n.glyphicon-grain:before {\n content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n content: \"\\e253\";\n}\n.glyphicon-console:before {\n content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n content: \"\\e260\";\n}\n* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n*:before,\n*:after {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\nhtml {\n font-size: 10px;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n line-height: 1.42857143;\n color: #333333;\n background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\na {\n color: #337ab7;\n text-decoration: none;\n}\na:hover,\na:focus {\n color: #23527c;\n text-decoration: underline;\n}\na:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\nfigure {\n margin: 0;\n}\nimg {\n vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n display: block;\n max-width: 100%;\n height: auto;\n}\n.img-rounded {\n border-radius: 6px;\n}\n.img-thumbnail {\n padding: 4px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: all 0.2s ease-in-out;\n -o-transition: all 0.2s ease-in-out;\n transition: all 0.2s ease-in-out;\n display: inline-block;\n max-width: 100%;\n height: auto;\n}\n.img-circle {\n border-radius: 50%;\n}\nhr {\n margin-top: 20px;\n margin-bottom: 20px;\n border: 0;\n border-top: 1px solid #eeeeee;\n}\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n margin: -1px;\n padding: 0;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n}\n[role=\"button\"] {\n cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n font-weight: normal;\n line-height: 1;\n color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n margin-top: 20px;\n margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n margin-top: 10px;\n margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n font-size: 75%;\n}\nh1,\n.h1 {\n font-size: 36px;\n}\nh2,\n.h2 {\n font-size: 30px;\n}\nh3,\n.h3 {\n font-size: 24px;\n}\nh4,\n.h4 {\n font-size: 18px;\n}\nh5,\n.h5 {\n font-size: 14px;\n}\nh6,\n.h6 {\n font-size: 12px;\n}\np {\n margin: 0 0 10px;\n}\n.lead {\n margin-bottom: 20px;\n font-size: 16px;\n font-weight: 300;\n line-height: 1.4;\n}\n@media (min-width: 768px) {\n .lead {\n font-size: 21px;\n }\n}\nsmall,\n.small {\n font-size: 85%;\n}\nmark,\n.mark {\n background-color: #fcf8e3;\n padding: .2em;\n}\n.text-left {\n text-align: left;\n}\n.text-right {\n text-align: right;\n}\n.text-center {\n text-align: center;\n}\n.text-justify {\n text-align: justify;\n}\n.text-nowrap {\n white-space: nowrap;\n}\n.text-lowercase {\n text-transform: lowercase;\n}\n.text-uppercase {\n text-transform: uppercase;\n}\n.text-capitalize {\n text-transform: capitalize;\n}\n.text-muted {\n color: #777777;\n}\n.text-primary {\n color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n color: #286090;\n}\n.text-success {\n color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n color: #2b542c;\n}\n.text-info {\n color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n color: #245269;\n}\n.text-warning {\n color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n color: #66512c;\n}\n.text-danger {\n color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n color: #843534;\n}\n.bg-primary {\n color: #fff;\n background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n background-color: #286090;\n}\n.bg-success {\n background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n background-color: #c1e2b3;\n}\n.bg-info {\n background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n background-color: #afd9ee;\n}\n.bg-warning {\n background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n background-color: #f7ecb5;\n}\n.bg-danger {\n background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n background-color: #e4b9b9;\n}\n.page-header {\n padding-bottom: 9px;\n margin: 40px 0 20px;\n border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n margin-top: 0;\n margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n margin-bottom: 0;\n}\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n.list-inline {\n padding-left: 0;\n list-style: none;\n margin-left: -5px;\n}\n.list-inline > li {\n display: inline-block;\n padding-left: 5px;\n padding-right: 5px;\n}\ndl {\n margin-top: 0;\n margin-bottom: 20px;\n}\ndt,\ndd {\n line-height: 1.42857143;\n}\ndt {\n font-weight: bold;\n}\ndd {\n margin-left: 0;\n}\n@media (min-width: 768px) {\n .dl-horizontal dt {\n float: left;\n width: 160px;\n clear: left;\n text-align: right;\n overflow: hidden;\n text-overflow: ellipsis;\n white-space: nowrap;\n }\n .dl-horizontal dd {\n margin-left: 180px;\n }\n}\nabbr[title],\nabbr[data-original-title] {\n cursor: help;\n border-bottom: 1px dotted #777777;\n}\n.initialism {\n font-size: 90%;\n text-transform: uppercase;\n}\nblockquote {\n padding: 10px 20px;\n margin: 0 0 20px;\n font-size: 17.5px;\n border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n display: block;\n font-size: 80%;\n line-height: 1.42857143;\n color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n border-right: 5px solid #eeeeee;\n border-left: 0;\n text-align: right;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n content: '\\00A0 \\2014';\n}\naddress {\n margin-bottom: 20px;\n font-style: normal;\n line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n}\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: #fff;\n background-color: #333;\n border-radius: 3px;\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n padding: 0;\n font-size: 100%;\n font-weight: bold;\n box-shadow: none;\n}\npre {\n display: block;\n padding: 9.5px;\n margin: 0 0 10px;\n font-size: 13px;\n line-height: 1.42857143;\n word-break: break-all;\n word-wrap: break-word;\n color: #333333;\n background-color: #f5f5f5;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\npre code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n}\n.pre-scrollable {\n max-height: 340px;\n overflow-y: scroll;\n}\n.container {\n margin-right: auto;\n margin-left: auto;\n padding-left: 15px;\n padding-right: 15px;\n}\n@media (min-width: 768px) {\n .container {\n width: 750px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 970px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1170px;\n }\n}\n.container-fluid {\n margin-right: auto;\n margin-left: auto;\n padding-left: 15px;\n padding-right: 15px;\n}\n.row {\n margin-left: -15px;\n margin-right: -15px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n position: relative;\n min-height: 1px;\n padding-left: 15px;\n padding-right: 15px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n float: left;\n}\n.col-xs-12 {\n width: 100%;\n}\n.col-xs-11 {\n width: 91.66666667%;\n}\n.col-xs-10 {\n width: 83.33333333%;\n}\n.col-xs-9 {\n width: 75%;\n}\n.col-xs-8 {\n width: 66.66666667%;\n}\n.col-xs-7 {\n width: 58.33333333%;\n}\n.col-xs-6 {\n width: 50%;\n}\n.col-xs-5 {\n width: 41.66666667%;\n}\n.col-xs-4 {\n width: 33.33333333%;\n}\n.col-xs-3 {\n width: 25%;\n}\n.col-xs-2 {\n width: 16.66666667%;\n}\n.col-xs-1 {\n width: 8.33333333%;\n}\n.col-xs-pull-12 {\n right: 100%;\n}\n.col-xs-pull-11 {\n right: 91.66666667%;\n}\n.col-xs-pull-10 {\n right: 83.33333333%;\n}\n.col-xs-pull-9 {\n right: 75%;\n}\n.col-xs-pull-8 {\n right: 66.66666667%;\n}\n.col-xs-pull-7 {\n right: 58.33333333%;\n}\n.col-xs-pull-6 {\n right: 50%;\n}\n.col-xs-pull-5 {\n right: 41.66666667%;\n}\n.col-xs-pull-4 {\n right: 33.33333333%;\n}\n.col-xs-pull-3 {\n right: 25%;\n}\n.col-xs-pull-2 {\n right: 16.66666667%;\n}\n.col-xs-pull-1 {\n right: 8.33333333%;\n}\n.col-xs-pull-0 {\n right: auto;\n}\n.col-xs-push-12 {\n left: 100%;\n}\n.col-xs-push-11 {\n left: 91.66666667%;\n}\n.col-xs-push-10 {\n left: 83.33333333%;\n}\n.col-xs-push-9 {\n left: 75%;\n}\n.col-xs-push-8 {\n left: 66.66666667%;\n}\n.col-xs-push-7 {\n left: 58.33333333%;\n}\n.col-xs-push-6 {\n left: 50%;\n}\n.col-xs-push-5 {\n left: 41.66666667%;\n}\n.col-xs-push-4 {\n left: 33.33333333%;\n}\n.col-xs-push-3 {\n left: 25%;\n}\n.col-xs-push-2 {\n left: 16.66666667%;\n}\n.col-xs-push-1 {\n left: 8.33333333%;\n}\n.col-xs-push-0 {\n left: auto;\n}\n.col-xs-offset-12 {\n margin-left: 100%;\n}\n.col-xs-offset-11 {\n margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n margin-left: 75%;\n}\n.col-xs-offset-8 {\n margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n margin-left: 50%;\n}\n.col-xs-offset-5 {\n margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n margin-left: 25%;\n}\n.col-xs-offset-2 {\n margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n margin-left: 0%;\n}\n@media (min-width: 768px) {\n .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n float: left;\n }\n .col-sm-12 {\n width: 100%;\n }\n .col-sm-11 {\n width: 91.66666667%;\n }\n .col-sm-10 {\n width: 83.33333333%;\n }\n .col-sm-9 {\n width: 75%;\n }\n .col-sm-8 {\n width: 66.66666667%;\n }\n .col-sm-7 {\n width: 58.33333333%;\n }\n .col-sm-6 {\n width: 50%;\n }\n .col-sm-5 {\n width: 41.66666667%;\n }\n .col-sm-4 {\n width: 33.33333333%;\n }\n .col-sm-3 {\n width: 25%;\n }\n .col-sm-2 {\n width: 16.66666667%;\n }\n .col-sm-1 {\n width: 8.33333333%;\n }\n .col-sm-pull-12 {\n right: 100%;\n }\n .col-sm-pull-11 {\n right: 91.66666667%;\n }\n .col-sm-pull-10 {\n right: 83.33333333%;\n }\n .col-sm-pull-9 {\n right: 75%;\n }\n .col-sm-pull-8 {\n right: 66.66666667%;\n }\n .col-sm-pull-7 {\n right: 58.33333333%;\n }\n .col-sm-pull-6 {\n right: 50%;\n }\n .col-sm-pull-5 {\n right: 41.66666667%;\n }\n .col-sm-pull-4 {\n right: 33.33333333%;\n }\n .col-sm-pull-3 {\n right: 25%;\n }\n .col-sm-pull-2 {\n right: 16.66666667%;\n }\n .col-sm-pull-1 {\n right: 8.33333333%;\n }\n .col-sm-pull-0 {\n right: auto;\n }\n .col-sm-push-12 {\n left: 100%;\n }\n .col-sm-push-11 {\n left: 91.66666667%;\n }\n .col-sm-push-10 {\n left: 83.33333333%;\n }\n .col-sm-push-9 {\n left: 75%;\n }\n .col-sm-push-8 {\n left: 66.66666667%;\n }\n .col-sm-push-7 {\n left: 58.33333333%;\n }\n .col-sm-push-6 {\n left: 50%;\n }\n .col-sm-push-5 {\n left: 41.66666667%;\n }\n .col-sm-push-4 {\n left: 33.33333333%;\n }\n .col-sm-push-3 {\n left: 25%;\n }\n .col-sm-push-2 {\n left: 16.66666667%;\n }\n .col-sm-push-1 {\n left: 8.33333333%;\n }\n .col-sm-push-0 {\n left: auto;\n }\n .col-sm-offset-12 {\n margin-left: 100%;\n }\n .col-sm-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-sm-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-sm-offset-9 {\n margin-left: 75%;\n }\n .col-sm-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-sm-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-sm-offset-6 {\n margin-left: 50%;\n }\n .col-sm-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-sm-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-sm-offset-3 {\n margin-left: 25%;\n }\n .col-sm-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-sm-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-sm-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 992px) {\n .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n float: left;\n }\n .col-md-12 {\n width: 100%;\n }\n .col-md-11 {\n width: 91.66666667%;\n }\n .col-md-10 {\n width: 83.33333333%;\n }\n .col-md-9 {\n width: 75%;\n }\n .col-md-8 {\n width: 66.66666667%;\n }\n .col-md-7 {\n width: 58.33333333%;\n }\n .col-md-6 {\n width: 50%;\n }\n .col-md-5 {\n width: 41.66666667%;\n }\n .col-md-4 {\n width: 33.33333333%;\n }\n .col-md-3 {\n width: 25%;\n }\n .col-md-2 {\n width: 16.66666667%;\n }\n .col-md-1 {\n width: 8.33333333%;\n }\n .col-md-pull-12 {\n right: 100%;\n }\n .col-md-pull-11 {\n right: 91.66666667%;\n }\n .col-md-pull-10 {\n right: 83.33333333%;\n }\n .col-md-pull-9 {\n right: 75%;\n }\n .col-md-pull-8 {\n right: 66.66666667%;\n }\n .col-md-pull-7 {\n right: 58.33333333%;\n }\n .col-md-pull-6 {\n right: 50%;\n }\n .col-md-pull-5 {\n right: 41.66666667%;\n }\n .col-md-pull-4 {\n right: 33.33333333%;\n }\n .col-md-pull-3 {\n right: 25%;\n }\n .col-md-pull-2 {\n right: 16.66666667%;\n }\n .col-md-pull-1 {\n right: 8.33333333%;\n }\n .col-md-pull-0 {\n right: auto;\n }\n .col-md-push-12 {\n left: 100%;\n }\n .col-md-push-11 {\n left: 91.66666667%;\n }\n .col-md-push-10 {\n left: 83.33333333%;\n }\n .col-md-push-9 {\n left: 75%;\n }\n .col-md-push-8 {\n left: 66.66666667%;\n }\n .col-md-push-7 {\n left: 58.33333333%;\n }\n .col-md-push-6 {\n left: 50%;\n }\n .col-md-push-5 {\n left: 41.66666667%;\n }\n .col-md-push-4 {\n left: 33.33333333%;\n }\n .col-md-push-3 {\n left: 25%;\n }\n .col-md-push-2 {\n left: 16.66666667%;\n }\n .col-md-push-1 {\n left: 8.33333333%;\n }\n .col-md-push-0 {\n left: auto;\n }\n .col-md-offset-12 {\n margin-left: 100%;\n }\n .col-md-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-md-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-md-offset-9 {\n margin-left: 75%;\n }\n .col-md-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-md-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-md-offset-6 {\n margin-left: 50%;\n }\n .col-md-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-md-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-md-offset-3 {\n margin-left: 25%;\n }\n .col-md-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-md-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-md-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 1200px) {\n .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n float: left;\n }\n .col-lg-12 {\n width: 100%;\n }\n .col-lg-11 {\n width: 91.66666667%;\n }\n .col-lg-10 {\n width: 83.33333333%;\n }\n .col-lg-9 {\n width: 75%;\n }\n .col-lg-8 {\n width: 66.66666667%;\n }\n .col-lg-7 {\n width: 58.33333333%;\n }\n .col-lg-6 {\n width: 50%;\n }\n .col-lg-5 {\n width: 41.66666667%;\n }\n .col-lg-4 {\n width: 33.33333333%;\n }\n .col-lg-3 {\n width: 25%;\n }\n .col-lg-2 {\n width: 16.66666667%;\n }\n .col-lg-1 {\n width: 8.33333333%;\n }\n .col-lg-pull-12 {\n right: 100%;\n }\n .col-lg-pull-11 {\n right: 91.66666667%;\n }\n .col-lg-pull-10 {\n right: 83.33333333%;\n }\n .col-lg-pull-9 {\n right: 75%;\n }\n .col-lg-pull-8 {\n right: 66.66666667%;\n }\n .col-lg-pull-7 {\n right: 58.33333333%;\n }\n .col-lg-pull-6 {\n right: 50%;\n }\n .col-lg-pull-5 {\n right: 41.66666667%;\n }\n .col-lg-pull-4 {\n right: 33.33333333%;\n }\n .col-lg-pull-3 {\n right: 25%;\n }\n .col-lg-pull-2 {\n right: 16.66666667%;\n }\n .col-lg-pull-1 {\n right: 8.33333333%;\n }\n .col-lg-pull-0 {\n right: auto;\n }\n .col-lg-push-12 {\n left: 100%;\n }\n .col-lg-push-11 {\n left: 91.66666667%;\n }\n .col-lg-push-10 {\n left: 83.33333333%;\n }\n .col-lg-push-9 {\n left: 75%;\n }\n .col-lg-push-8 {\n left: 66.66666667%;\n }\n .col-lg-push-7 {\n left: 58.33333333%;\n }\n .col-lg-push-6 {\n left: 50%;\n }\n .col-lg-push-5 {\n left: 41.66666667%;\n }\n .col-lg-push-4 {\n left: 33.33333333%;\n }\n .col-lg-push-3 {\n left: 25%;\n }\n .col-lg-push-2 {\n left: 16.66666667%;\n }\n .col-lg-push-1 {\n left: 8.33333333%;\n }\n .col-lg-push-0 {\n left: auto;\n }\n .col-lg-offset-12 {\n margin-left: 100%;\n }\n .col-lg-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-lg-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-lg-offset-9 {\n margin-left: 75%;\n }\n .col-lg-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-lg-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-lg-offset-6 {\n margin-left: 50%;\n }\n .col-lg-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-lg-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-lg-offset-3 {\n margin-left: 25%;\n }\n .col-lg-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-lg-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-lg-offset-0 {\n margin-left: 0%;\n }\n}\ntable {\n background-color: transparent;\n}\ncaption {\n padding-top: 8px;\n padding-bottom: 8px;\n color: #777777;\n text-align: left;\n}\nth {\n text-align: left;\n}\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n border-top: 0;\n}\n.table > tbody + tbody {\n border-top: 2px solid #ddd;\n}\n.table .table {\n background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n padding: 5px;\n}\n.table-bordered {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n position: static;\n float: none;\n display: table-column;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n position: static;\n float: none;\n display: table-cell;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n background-color: #ebcccc;\n}\n.table-responsive {\n overflow-x: auto;\n min-height: 0.01%;\n}\n@media screen and (max-width: 767px) {\n .table-responsive {\n width: 100%;\n margin-bottom: 15px;\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid #ddd;\n }\n .table-responsive > .table {\n margin-bottom: 0;\n }\n .table-responsive > .table > thead > tr > th,\n .table-responsive > .table > tbody > tr > th,\n .table-responsive > .table > tfoot > tr > th,\n .table-responsive > .table > thead > tr > td,\n .table-responsive > .table > tbody > tr > td,\n .table-responsive > .table > tfoot > tr > td {\n white-space: nowrap;\n }\n .table-responsive > .table-bordered {\n border: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:first-child,\n .table-responsive > .table-bordered > tbody > tr > th:first-child,\n .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n .table-responsive > .table-bordered > thead > tr > td:first-child,\n .table-responsive > .table-bordered > tbody > tr > td:first-child,\n .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:last-child,\n .table-responsive > .table-bordered > tbody > tr > th:last-child,\n .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n .table-responsive > .table-bordered > thead > tr > td:last-child,\n .table-responsive > .table-bordered > tbody > tr > td:last-child,\n .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n }\n .table-responsive > .table-bordered > tbody > tr:last-child > th,\n .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n .table-responsive > .table-bordered > tbody > tr:last-child > td,\n .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n border-bottom: 0;\n }\n}\nfieldset {\n padding: 0;\n margin: 0;\n border: 0;\n min-width: 0;\n}\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: 20px;\n font-size: 21px;\n line-height: inherit;\n color: #333333;\n border: 0;\n border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n display: inline-block;\n max-width: 100%;\n margin-bottom: 5px;\n font-weight: bold;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9;\n line-height: normal;\n}\ninput[type=\"file\"] {\n display: block;\n}\ninput[type=\"range\"] {\n display: block;\n width: 100%;\n}\nselect[multiple],\nselect[size] {\n height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\noutput {\n display: block;\n padding-top: 7px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555555;\n}\n.form-control {\n display: block;\n width: 100%;\n height: 34px;\n padding: 6px 12px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n color: #999;\n}\n.form-control::-webkit-input-placeholder {\n color: #999;\n}\n.form-control::-ms-expand {\n border: 0;\n background-color: transparent;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n background-color: #eeeeee;\n opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n cursor: not-allowed;\n}\ntextarea.form-control {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: none;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"].form-control,\n input[type=\"time\"].form-control,\n input[type=\"datetime-local\"].form-control,\n input[type=\"month\"].form-control {\n line-height: 34px;\n }\n input[type=\"date\"].input-sm,\n input[type=\"time\"].input-sm,\n input[type=\"datetime-local\"].input-sm,\n input[type=\"month\"].input-sm,\n .input-group-sm input[type=\"date\"],\n .input-group-sm input[type=\"time\"],\n .input-group-sm input[type=\"datetime-local\"],\n .input-group-sm input[type=\"month\"] {\n line-height: 30px;\n }\n input[type=\"date\"].input-lg,\n input[type=\"time\"].input-lg,\n input[type=\"datetime-local\"].input-lg,\n input[type=\"month\"].input-lg,\n .input-group-lg input[type=\"date\"],\n .input-group-lg input[type=\"time\"],\n .input-group-lg input[type=\"datetime-local\"],\n .input-group-lg input[type=\"month\"] {\n line-height: 46px;\n }\n}\n.form-group {\n margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n min-height: 20px;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: normal;\n cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n position: absolute;\n margin-left: -20px;\n margin-top: 4px \\9;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n position: relative;\n display: inline-block;\n padding-left: 20px;\n margin-bottom: 0;\n vertical-align: middle;\n font-weight: normal;\n cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n margin-top: 0;\n margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n cursor: not-allowed;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n cursor: not-allowed;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n cursor: not-allowed;\n}\n.form-control-static {\n padding-top: 7px;\n padding-bottom: 7px;\n margin-bottom: 0;\n min-height: 34px;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n padding-left: 0;\n padding-right: 0;\n}\n.input-sm {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-sm {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n height: auto;\n}\n.form-group-sm .form-control {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.form-group-sm select.form-control {\n height: 30px;\n line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n height: auto;\n}\n.form-group-sm .form-control-static {\n height: 30px;\n min-height: 32px;\n padding: 6px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.input-lg {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-lg {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n height: auto;\n}\n.form-group-lg .form-control {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\n.form-group-lg select.form-control {\n height: 46px;\n line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n height: auto;\n}\n.form-group-lg .form-control-static {\n height: 46px;\n min-height: 38px;\n padding: 11px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n}\n.has-feedback {\n position: relative;\n}\n.has-feedback .form-control {\n padding-right: 42.5px;\n}\n.form-control-feedback {\n position: absolute;\n top: 0;\n right: 0;\n z-index: 2;\n display: block;\n width: 34px;\n height: 34px;\n line-height: 34px;\n text-align: center;\n pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n width: 46px;\n height: 46px;\n line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n width: 30px;\n height: 30px;\n line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n color: #3c763d;\n}\n.has-success .form-control {\n border-color: #3c763d;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n border-color: #2b542c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n color: #3c763d;\n border-color: #3c763d;\n background-color: #dff0d8;\n}\n.has-success .form-control-feedback {\n color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n color: #8a6d3b;\n}\n.has-warning .form-control {\n border-color: #8a6d3b;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n border-color: #66512c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n color: #8a6d3b;\n border-color: #8a6d3b;\n background-color: #fcf8e3;\n}\n.has-warning .form-control-feedback {\n color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n color: #a94442;\n}\n.has-error .form-control {\n border-color: #a94442;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n border-color: #843534;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n color: #a94442;\n border-color: #a94442;\n background-color: #f2dede;\n}\n.has-error .form-control-feedback {\n color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n top: 0;\n}\n.help-block {\n display: block;\n margin-top: 5px;\n margin-bottom: 10px;\n color: #737373;\n}\n@media (min-width: 768px) {\n .form-inline .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .form-inline .form-control-static {\n display: inline-block;\n }\n .form-inline .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .form-inline .input-group .input-group-addon,\n .form-inline .input-group .input-group-btn,\n .form-inline .input-group .form-control {\n width: auto;\n }\n .form-inline .input-group > .form-control {\n width: 100%;\n }\n .form-inline .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio,\n .form-inline .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio label,\n .form-inline .checkbox label {\n padding-left: 0;\n }\n .form-inline .radio input[type=\"radio\"],\n .form-inline .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .form-inline .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n margin-top: 0;\n margin-bottom: 0;\n padding-top: 7px;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n min-height: 27px;\n}\n.form-horizontal .form-group {\n margin-left: -15px;\n margin-right: -15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .control-label {\n text-align: right;\n margin-bottom: 0;\n padding-top: 7px;\n }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n right: 15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-lg .control-label {\n padding-top: 11px;\n font-size: 18px;\n }\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-sm .control-label {\n padding-top: 6px;\n font-size: 12px;\n }\n}\n.btn {\n display: inline-block;\n margin-bottom: 0;\n font-weight: normal;\n text-align: center;\n vertical-align: middle;\n touch-action: manipulation;\n cursor: pointer;\n background-image: none;\n border: 1px solid transparent;\n white-space: nowrap;\n padding: 6px 12px;\n font-size: 14px;\n line-height: 1.42857143;\n border-radius: 4px;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n color: #333;\n text-decoration: none;\n}\n.btn:active,\n.btn.active {\n outline: 0;\n background-image: none;\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n cursor: not-allowed;\n opacity: 0.65;\n filter: alpha(opacity=65);\n -webkit-box-shadow: none;\n box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n pointer-events: none;\n}\n.btn-default {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\n.btn-default:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n background-image: none;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default .badge {\n color: #fff;\n background-color: #333;\n}\n.btn-primary {\n color: #fff;\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n color: #fff;\n background-color: #286090;\n border-color: #122b40;\n}\n.btn-primary:hover {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n color: #fff;\n background-color: #204d74;\n border-color: #122b40;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n background-image: none;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.btn-success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n color: #fff;\n background-color: #449d44;\n border-color: #255625;\n}\n.btn-success:hover {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n color: #fff;\n background-color: #398439;\n border-color: #255625;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n background-image: none;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.btn-info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n color: #fff;\n background-color: #31b0d5;\n border-color: #1b6d85;\n}\n.btn-info:hover {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n color: #fff;\n background-color: #269abc;\n border-color: #1b6d85;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n background-image: none;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.btn-warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n color: #fff;\n background-color: #ec971f;\n border-color: #985f0d;\n}\n.btn-warning:hover {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n color: #fff;\n background-color: #d58512;\n border-color: #985f0d;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n background-image: none;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.btn-danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n color: #fff;\n background-color: #c9302c;\n border-color: #761c19;\n}\n.btn-danger:hover {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n color: #fff;\n background-color: #ac2925;\n border-color: #761c19;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n background-image: none;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\n.btn-link {\n color: #337ab7;\n font-weight: normal;\n border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n background-color: transparent;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n color: #23527c;\n text-decoration: underline;\n background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n color: #777777;\n text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n padding: 1px 5px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-block {\n display: block;\n width: 100%;\n}\n.btn-block + .btn-block {\n margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n width: 100%;\n}\n.fade {\n opacity: 0;\n -webkit-transition: opacity 0.15s linear;\n -o-transition: opacity 0.15s linear;\n transition: opacity 0.15s linear;\n}\n.fade.in {\n opacity: 1;\n}\n.collapse {\n display: none;\n}\n.collapse.in {\n display: block;\n}\ntr.collapse.in {\n display: table-row;\n}\ntbody.collapse.in {\n display: table-row-group;\n}\n.collapsing {\n position: relative;\n height: 0;\n overflow: hidden;\n -webkit-transition-property: height, visibility;\n transition-property: height, visibility;\n -webkit-transition-duration: 0.35s;\n transition-duration: 0.35s;\n -webkit-transition-timing-function: ease;\n transition-timing-function: ease;\n}\n.caret {\n display: inline-block;\n width: 0;\n height: 0;\n margin-left: 2px;\n vertical-align: middle;\n border-top: 4px dashed;\n border-top: 4px solid \\9;\n border-right: 4px solid transparent;\n border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n position: relative;\n}\n.dropdown-toggle:focus {\n outline: 0;\n}\n.dropdown-menu {\n position: absolute;\n top: 100%;\n left: 0;\n z-index: 1000;\n display: none;\n float: left;\n min-width: 160px;\n padding: 5px 0;\n margin: 2px 0 0;\n list-style: none;\n font-size: 14px;\n text-align: left;\n background-color: #fff;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.15);\n border-radius: 4px;\n -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n background-clip: padding-box;\n}\n.dropdown-menu.pull-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu .divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: normal;\n line-height: 1.42857143;\n color: #333333;\n white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n text-decoration: none;\n color: #262626;\n background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n color: #fff;\n text-decoration: none;\n outline: 0;\n background-color: #337ab7;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n text-decoration: none;\n background-color: transparent;\n background-image: none;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n cursor: not-allowed;\n}\n.open > .dropdown-menu {\n display: block;\n}\n.open > a {\n outline: 0;\n}\n.dropdown-menu-right {\n left: auto;\n right: 0;\n}\n.dropdown-menu-left {\n left: 0;\n right: auto;\n}\n.dropdown-header {\n display: block;\n padding: 3px 20px;\n font-size: 12px;\n line-height: 1.42857143;\n color: #777777;\n white-space: nowrap;\n}\n.dropdown-backdrop {\n position: fixed;\n left: 0;\n right: 0;\n bottom: 0;\n top: 0;\n z-index: 990;\n}\n.pull-right > .dropdown-menu {\n right: 0;\n left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n border-top: 0;\n border-bottom: 4px dashed;\n border-bottom: 4px solid \\9;\n content: \"\";\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n top: auto;\n bottom: 100%;\n margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n .navbar-right .dropdown-menu {\n left: auto;\n right: 0;\n }\n .navbar-right .dropdown-menu-left {\n left: 0;\n right: auto;\n }\n}\n.btn-group,\n.btn-group-vertical {\n position: relative;\n display: inline-block;\n vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n position: relative;\n float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n margin-left: -1px;\n}\n.btn-toolbar {\n margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n border-radius: 0;\n}\n.btn-group > .btn:first-child {\n margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group > .btn-group {\n float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n padding-left: 8px;\n padding-right: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n padding-left: 12px;\n padding-right: 12px;\n}\n.btn-group.open .dropdown-toggle {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn .caret {\n margin-left: 0;\n}\n.btn-lg .caret {\n border-width: 5px 5px 0;\n border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n display: block;\n float: none;\n width: 100%;\n max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n margin-top: -1px;\n margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n border-top-right-radius: 4px;\n border-top-left-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n border-bottom-right-radius: 4px;\n border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group-justified {\n display: table;\n width: 100%;\n table-layout: fixed;\n border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n float: none;\n display: table-cell;\n width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n position: absolute;\n clip: rect(0, 0, 0, 0);\n pointer-events: none;\n}\n.input-group {\n position: relative;\n display: table;\n border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n float: none;\n padding-left: 0;\n padding-right: 0;\n}\n.input-group .form-control {\n position: relative;\n z-index: 2;\n float: left;\n width: 100%;\n margin-bottom: 0;\n}\n.input-group .form-control:focus {\n z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n width: 1%;\n white-space: nowrap;\n vertical-align: middle;\n}\n.input-group-addon {\n padding: 6px 12px;\n font-size: 14px;\n font-weight: normal;\n line-height: 1;\n color: #555555;\n text-align: center;\n background-color: #eeeeee;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\n.input-group-addon.input-sm {\n padding: 5px 10px;\n font-size: 12px;\n border-radius: 3px;\n}\n.input-group-addon.input-lg {\n padding: 10px 16px;\n font-size: 18px;\n border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.input-group-addon:first-child {\n border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.input-group-addon:last-child {\n border-left: 0;\n}\n.input-group-btn {\n position: relative;\n font-size: 0;\n white-space: nowrap;\n}\n.input-group-btn > .btn {\n position: relative;\n}\n.input-group-btn > .btn + .btn {\n margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n z-index: 2;\n margin-left: -1px;\n}\n.nav {\n margin-bottom: 0;\n padding-left: 0;\n list-style: none;\n}\n.nav > li {\n position: relative;\n display: block;\n}\n.nav > li > a {\n position: relative;\n display: block;\n padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n color: #777777;\n text-decoration: none;\n background-color: transparent;\n cursor: not-allowed;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n background-color: #eeeeee;\n border-color: #337ab7;\n}\n.nav .nav-divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.nav > li > a > img {\n max-width: none;\n}\n.nav-tabs {\n border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n float: left;\n margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n margin-right: 2px;\n line-height: 1.42857143;\n border: 1px solid transparent;\n border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n color: #555555;\n background-color: #fff;\n border: 1px solid #ddd;\n border-bottom-color: transparent;\n cursor: default;\n}\n.nav-tabs.nav-justified {\n width: 100%;\n border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n float: none;\n}\n.nav-tabs.nav-justified > li > a {\n text-align: center;\n margin-bottom: 5px;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-tabs.nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs.nav-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs.nav-justified > .active > a,\n .nav-tabs.nav-justified > .active > a:hover,\n .nav-tabs.nav-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.nav-pills > li {\n float: left;\n}\n.nav-pills > li > a {\n border-radius: 4px;\n}\n.nav-pills > li + li {\n margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n color: #fff;\n background-color: #337ab7;\n}\n.nav-stacked > li {\n float: none;\n}\n.nav-stacked > li + li {\n margin-top: 2px;\n margin-left: 0;\n}\n.nav-justified {\n width: 100%;\n}\n.nav-justified > li {\n float: none;\n}\n.nav-justified > li > a {\n text-align: center;\n margin-bottom: 5px;\n}\n.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs-justified {\n border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs-justified > .active > a,\n .nav-tabs-justified > .active > a:hover,\n .nav-tabs-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.tab-content > .tab-pane {\n display: none;\n}\n.tab-content > .active {\n display: block;\n}\n.nav-tabs .dropdown-menu {\n margin-top: -1px;\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.navbar {\n position: relative;\n min-height: 50px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n .navbar {\n border-radius: 4px;\n }\n}\n@media (min-width: 768px) {\n .navbar-header {\n float: left;\n }\n}\n.navbar-collapse {\n overflow-x: visible;\n padding-right: 15px;\n padding-left: 15px;\n border-top: 1px solid transparent;\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n overflow-y: auto;\n}\n@media (min-width: 768px) {\n .navbar-collapse {\n width: auto;\n border-top: 0;\n box-shadow: none;\n }\n .navbar-collapse.collapse {\n display: block !important;\n height: auto !important;\n padding-bottom: 0;\n overflow: visible !important;\n }\n .navbar-collapse.in {\n overflow-y: visible;\n }\n .navbar-fixed-top .navbar-collapse,\n .navbar-static-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n padding-left: 0;\n padding-right: 0;\n }\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n .navbar-fixed-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n max-height: 200px;\n }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n margin-right: -15px;\n margin-left: -15px;\n}\n@media (min-width: 768px) {\n .container > .navbar-header,\n .container-fluid > .navbar-header,\n .container > .navbar-collapse,\n .container-fluid > .navbar-collapse {\n margin-right: 0;\n margin-left: 0;\n }\n}\n.navbar-static-top {\n z-index: 1000;\n border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n .navbar-static-top {\n border-radius: 0;\n }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n position: fixed;\n right: 0;\n left: 0;\n z-index: 1030;\n}\n@media (min-width: 768px) {\n .navbar-fixed-top,\n .navbar-fixed-bottom {\n border-radius: 0;\n }\n}\n.navbar-fixed-top {\n top: 0;\n border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n bottom: 0;\n margin-bottom: 0;\n border-width: 1px 0 0;\n}\n.navbar-brand {\n float: left;\n padding: 15px 15px;\n font-size: 18px;\n line-height: 20px;\n height: 50px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n text-decoration: none;\n}\n.navbar-brand > img {\n display: block;\n}\n@media (min-width: 768px) {\n .navbar > .container .navbar-brand,\n .navbar > .container-fluid .navbar-brand {\n margin-left: -15px;\n }\n}\n.navbar-toggle {\n position: relative;\n float: right;\n margin-right: 15px;\n padding: 9px 10px;\n margin-top: 8px;\n margin-bottom: 8px;\n background-color: transparent;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.navbar-toggle:focus {\n outline: 0;\n}\n.navbar-toggle .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n margin-top: 4px;\n}\n@media (min-width: 768px) {\n .navbar-toggle {\n display: none;\n }\n}\n.navbar-nav {\n margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n padding-top: 10px;\n padding-bottom: 10px;\n line-height: 20px;\n}\n@media (max-width: 767px) {\n .navbar-nav .open .dropdown-menu {\n position: static;\n float: none;\n width: auto;\n margin-top: 0;\n background-color: transparent;\n border: 0;\n box-shadow: none;\n }\n .navbar-nav .open .dropdown-menu > li > a,\n .navbar-nav .open .dropdown-menu .dropdown-header {\n padding: 5px 15px 5px 25px;\n }\n .navbar-nav .open .dropdown-menu > li > a {\n line-height: 20px;\n }\n .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-nav .open .dropdown-menu > li > a:focus {\n background-image: none;\n }\n}\n@media (min-width: 768px) {\n .navbar-nav {\n float: left;\n margin: 0;\n }\n .navbar-nav > li {\n float: left;\n }\n .navbar-nav > li > a {\n padding-top: 15px;\n padding-bottom: 15px;\n }\n}\n.navbar-form {\n margin-left: -15px;\n margin-right: -15px;\n padding: 10px 15px;\n border-top: 1px solid transparent;\n border-bottom: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n margin-top: 8px;\n margin-bottom: 8px;\n}\n@media (min-width: 768px) {\n .navbar-form .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .navbar-form .form-control-static {\n display: inline-block;\n }\n .navbar-form .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .navbar-form .input-group .input-group-addon,\n .navbar-form .input-group .input-group-btn,\n .navbar-form .input-group .form-control {\n width: auto;\n }\n .navbar-form .input-group > .form-control {\n width: 100%;\n }\n .navbar-form .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio,\n .navbar-form .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio label,\n .navbar-form .checkbox label {\n padding-left: 0;\n }\n .navbar-form .radio input[type=\"radio\"],\n .navbar-form .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .navbar-form .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n@media (max-width: 767px) {\n .navbar-form .form-group {\n margin-bottom: 5px;\n }\n .navbar-form .form-group:last-child {\n margin-bottom: 0;\n }\n}\n@media (min-width: 768px) {\n .navbar-form {\n width: auto;\n border: 0;\n margin-left: 0;\n margin-right: 0;\n padding-top: 0;\n padding-bottom: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n}\n.navbar-nav > li > .dropdown-menu {\n margin-top: 0;\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n margin-bottom: 0;\n border-top-right-radius: 4px;\n border-top-left-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.navbar-btn {\n margin-top: 8px;\n margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n margin-top: 14px;\n margin-bottom: 14px;\n}\n.navbar-text {\n margin-top: 15px;\n margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n .navbar-text {\n float: left;\n margin-left: 15px;\n margin-right: 15px;\n }\n}\n@media (min-width: 768px) {\n .navbar-left {\n float: left !important;\n }\n .navbar-right {\n float: right !important;\n margin-right: -15px;\n }\n .navbar-right ~ .navbar-right {\n margin-right: 0;\n }\n}\n.navbar-default {\n background-color: #f8f8f8;\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n color: #5e5e5e;\n background-color: transparent;\n}\n.navbar-default .navbar-text {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n color: #333;\n background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n background-color: #e7e7e7;\n color: #555;\n}\n@media (max-width: 767px) {\n .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n color: #777;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #333;\n background-color: transparent;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n }\n}\n.navbar-default .navbar-link {\n color: #777;\n}\n.navbar-default .navbar-link:hover {\n color: #333;\n}\n.navbar-default .btn-link {\n color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n color: #ccc;\n}\n.navbar-inverse {\n background-color: #222;\n border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n color: #fff;\n background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n background-color: #080808;\n color: #fff;\n}\n@media (max-width: 767px) {\n .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n border-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n color: #9d9d9d;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #fff;\n background-color: transparent;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n }\n}\n.navbar-inverse .navbar-link {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n color: #fff;\n}\n.navbar-inverse .btn-link {\n color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n color: #444;\n}\n.breadcrumb {\n padding: 8px 15px;\n margin-bottom: 20px;\n list-style: none;\n background-color: #f5f5f5;\n border-radius: 4px;\n}\n.breadcrumb > li {\n display: inline-block;\n}\n.breadcrumb > li + li:before {\n content: \"/\\00a0\";\n padding: 0 5px;\n color: #ccc;\n}\n.breadcrumb > .active {\n color: #777777;\n}\n.pagination {\n display: inline-block;\n padding-left: 0;\n margin: 20px 0;\n border-radius: 4px;\n}\n.pagination > li {\n display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n position: relative;\n float: left;\n padding: 6px 12px;\n line-height: 1.42857143;\n text-decoration: none;\n color: #337ab7;\n background-color: #fff;\n border: 1px solid #ddd;\n margin-left: -1px;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n margin-left: 0;\n border-bottom-left-radius: 4px;\n border-top-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n border-bottom-right-radius: 4px;\n border-top-right-radius: 4px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n z-index: 2;\n color: #23527c;\n background-color: #eeeeee;\n border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n z-index: 3;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n cursor: default;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n color: #777777;\n background-color: #fff;\n border-color: #ddd;\n cursor: not-allowed;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n border-bottom-left-radius: 6px;\n border-top-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n border-bottom-right-radius: 6px;\n border-top-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n border-bottom-left-radius: 3px;\n border-top-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n border-bottom-right-radius: 3px;\n border-top-right-radius: 3px;\n}\n.pager {\n padding-left: 0;\n margin: 20px 0;\n list-style: none;\n text-align: center;\n}\n.pager li {\n display: inline;\n}\n.pager li > a,\n.pager li > span {\n display: inline-block;\n padding: 5px 14px;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n color: #777777;\n background-color: #fff;\n cursor: not-allowed;\n}\n.label {\n display: inline;\n padding: .2em .6em .3em;\n font-size: 75%;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n border-radius: .25em;\n}\na.label:hover,\na.label:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.label:empty {\n display: none;\n}\n.btn .label {\n position: relative;\n top: -1px;\n}\n.label-default {\n background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n background-color: #5e5e5e;\n}\n.label-primary {\n background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n background-color: #286090;\n}\n.label-success {\n background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n background-color: #449d44;\n}\n.label-info {\n background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n background-color: #31b0d5;\n}\n.label-warning {\n background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n background-color: #ec971f;\n}\n.label-danger {\n background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n background-color: #c9302c;\n}\n.badge {\n display: inline-block;\n min-width: 10px;\n padding: 3px 7px;\n font-size: 12px;\n font-weight: bold;\n color: #fff;\n line-height: 1;\n vertical-align: middle;\n white-space: nowrap;\n text-align: center;\n background-color: #777777;\n border-radius: 10px;\n}\n.badge:empty {\n display: none;\n}\n.btn .badge {\n position: relative;\n top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n top: 0;\n padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.list-group-item > .badge {\n float: right;\n}\n.list-group-item > .badge + .badge {\n margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n margin-left: 3px;\n}\n.jumbotron {\n padding-top: 30px;\n padding-bottom: 30px;\n margin-bottom: 30px;\n color: inherit;\n background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n color: inherit;\n}\n.jumbotron p {\n margin-bottom: 15px;\n font-size: 21px;\n font-weight: 200;\n}\n.jumbotron > hr {\n border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n border-radius: 6px;\n padding-left: 15px;\n padding-right: 15px;\n}\n.jumbotron .container {\n max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n .jumbotron {\n padding-top: 48px;\n padding-bottom: 48px;\n }\n .container .jumbotron,\n .container-fluid .jumbotron {\n padding-left: 60px;\n padding-right: 60px;\n }\n .jumbotron h1,\n .jumbotron .h1 {\n font-size: 63px;\n }\n}\n.thumbnail {\n display: block;\n padding: 4px;\n margin-bottom: 20px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: border 0.2s ease-in-out;\n -o-transition: border 0.2s ease-in-out;\n transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n margin-left: auto;\n margin-right: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n border-color: #337ab7;\n}\n.thumbnail .caption {\n padding: 9px;\n color: #333333;\n}\n.alert {\n padding: 15px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.alert h4 {\n margin-top: 0;\n color: inherit;\n}\n.alert .alert-link {\n font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n margin-bottom: 0;\n}\n.alert > p + p {\n margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n position: relative;\n top: -2px;\n right: -21px;\n color: inherit;\n}\n.alert-success {\n background-color: #dff0d8;\n border-color: #d6e9c6;\n color: #3c763d;\n}\n.alert-success hr {\n border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n color: #2b542c;\n}\n.alert-info {\n background-color: #d9edf7;\n border-color: #bce8f1;\n color: #31708f;\n}\n.alert-info hr {\n border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n color: #245269;\n}\n.alert-warning {\n background-color: #fcf8e3;\n border-color: #faebcc;\n color: #8a6d3b;\n}\n.alert-warning hr {\n border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n color: #66512c;\n}\n.alert-danger {\n background-color: #f2dede;\n border-color: #ebccd1;\n color: #a94442;\n}\n.alert-danger hr {\n border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n.progress {\n overflow: hidden;\n height: 20px;\n margin-bottom: 20px;\n background-color: #f5f5f5;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n float: left;\n width: 0%;\n height: 100%;\n font-size: 12px;\n line-height: 20px;\n color: #fff;\n text-align: center;\n background-color: #337ab7;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n -webkit-transition: width 0.6s ease;\n -o-transition: width 0.6s ease;\n transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n -webkit-animation: progress-bar-stripes 2s linear infinite;\n -o-animation: progress-bar-stripes 2s linear infinite;\n animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n margin-top: 15px;\n}\n.media:first-child {\n margin-top: 0;\n}\n.media,\n.media-body {\n zoom: 1;\n overflow: hidden;\n}\n.media-body {\n width: 10000px;\n}\n.media-object {\n display: block;\n}\n.media-object.img-thumbnail {\n max-width: none;\n}\n.media-right,\n.media > .pull-right {\n padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n display: table-cell;\n vertical-align: top;\n}\n.media-middle {\n vertical-align: middle;\n}\n.media-bottom {\n vertical-align: bottom;\n}\n.media-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.media-list {\n padding-left: 0;\n list-style: none;\n}\n.list-group {\n margin-bottom: 20px;\n padding-left: 0;\n}\n.list-group-item {\n position: relative;\n display: block;\n padding: 10px 15px;\n margin-bottom: -1px;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n border-top-right-radius: 4px;\n border-top-left-radius: 4px;\n}\n.list-group-item:last-child {\n margin-bottom: 0;\n border-bottom-right-radius: 4px;\n border-bottom-left-radius: 4px;\n}\na.list-group-item,\nbutton.list-group-item {\n color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n text-decoration: none;\n color: #555;\n background-color: #f5f5f5;\n}\nbutton.list-group-item {\n width: 100%;\n text-align: left;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n background-color: #eeeeee;\n color: #777777;\n cursor: not-allowed;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n z-index: 2;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n color: #c7ddef;\n}\n.list-group-item-success {\n color: #3c763d;\n background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n color: #3c763d;\n background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n color: #fff;\n background-color: #3c763d;\n border-color: #3c763d;\n}\n.list-group-item-info {\n color: #31708f;\n background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n color: #31708f;\n background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n color: #fff;\n background-color: #31708f;\n border-color: #31708f;\n}\n.list-group-item-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n color: #8a6d3b;\n background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n color: #fff;\n background-color: #8a6d3b;\n border-color: #8a6d3b;\n}\n.list-group-item-danger {\n color: #a94442;\n background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n color: #a94442;\n background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n color: #fff;\n background-color: #a94442;\n border-color: #a94442;\n}\n.list-group-item-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.list-group-item-text {\n margin-bottom: 0;\n line-height: 1.3;\n}\n.panel {\n margin-bottom: 20px;\n background-color: #fff;\n border: 1px solid transparent;\n border-radius: 4px;\n -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n padding: 15px;\n}\n.panel-heading {\n padding: 10px 15px;\n border-bottom: 1px solid transparent;\n border-top-right-radius: 3px;\n border-top-left-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n color: inherit;\n}\n.panel-title {\n margin-top: 0;\n margin-bottom: 0;\n font-size: 16px;\n color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n color: inherit;\n}\n.panel-footer {\n padding: 10px 15px;\n background-color: #f5f5f5;\n border-top: 1px solid #ddd;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n border-width: 1px 0;\n border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n border-top: 0;\n border-top-right-radius: 3px;\n border-top-left-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n border-bottom: 0;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n border-top-width: 0;\n}\n.list-group + .panel-footer {\n border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n padding-left: 15px;\n padding-right: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n border-top-right-radius: 3px;\n border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n border-bottom-left-radius: 3px;\n border-bottom-right-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n border-bottom: 0;\n}\n.panel > .table-responsive {\n border: 0;\n margin-bottom: 0;\n}\n.panel-group {\n margin-bottom: 20px;\n}\n.panel-group .panel {\n margin-bottom: 0;\n border-radius: 4px;\n}\n.panel-group .panel + .panel {\n margin-top: 5px;\n}\n.panel-group .panel-heading {\n border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n border-bottom: 1px solid #ddd;\n}\n.panel-default {\n border-color: #ddd;\n}\n.panel-default > .panel-heading {\n color: #333333;\n background-color: #f5f5f5;\n border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n color: #f5f5f5;\n background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ddd;\n}\n.panel-primary {\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #337ab7;\n}\n.panel-success {\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n color: #dff0d8;\n background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #d6e9c6;\n}\n.panel-info {\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n color: #d9edf7;\n background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #bce8f1;\n}\n.panel-warning {\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n color: #fcf8e3;\n background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #faebcc;\n}\n.panel-danger {\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n color: #f2dede;\n background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n position: relative;\n display: block;\n height: 0;\n padding: 0;\n overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n position: absolute;\n top: 0;\n left: 0;\n bottom: 0;\n height: 100%;\n width: 100%;\n border: 0;\n}\n.embed-responsive-16by9 {\n padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n padding-bottom: 75%;\n}\n.well {\n min-height: 20px;\n padding: 19px;\n margin-bottom: 20px;\n background-color: #f5f5f5;\n border: 1px solid #e3e3e3;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n border-color: #ddd;\n border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n padding: 24px;\n border-radius: 6px;\n}\n.well-sm {\n padding: 9px;\n border-radius: 3px;\n}\n.close {\n float: right;\n font-size: 21px;\n font-weight: bold;\n line-height: 1;\n color: #000;\n text-shadow: 0 1px 0 #fff;\n opacity: 0.2;\n filter: alpha(opacity=20);\n}\n.close:hover,\n.close:focus {\n color: #000;\n text-decoration: none;\n cursor: pointer;\n opacity: 0.5;\n filter: alpha(opacity=50);\n}\nbutton.close {\n padding: 0;\n cursor: pointer;\n background: transparent;\n border: 0;\n -webkit-appearance: none;\n}\n.modal-open {\n overflow: hidden;\n}\n.modal {\n display: none;\n overflow: hidden;\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1050;\n -webkit-overflow-scrolling: touch;\n outline: 0;\n}\n.modal.fade .modal-dialog {\n -webkit-transform: translate(0, -25%);\n -ms-transform: translate(0, -25%);\n -o-transform: translate(0, -25%);\n transform: translate(0, -25%);\n -webkit-transition: -webkit-transform 0.3s ease-out;\n -moz-transition: -moz-transform 0.3s ease-out;\n -o-transition: -o-transform 0.3s ease-out;\n transition: transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\n.modal-open .modal {\n overflow-x: hidden;\n overflow-y: auto;\n}\n.modal-dialog {\n position: relative;\n width: auto;\n margin: 10px;\n}\n.modal-content {\n position: relative;\n background-color: #fff;\n border: 1px solid #999;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 6px;\n -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n background-clip: padding-box;\n outline: 0;\n}\n.modal-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1040;\n background-color: #000;\n}\n.modal-backdrop.fade {\n opacity: 0;\n filter: alpha(opacity=0);\n}\n.modal-backdrop.in {\n opacity: 0.5;\n filter: alpha(opacity=50);\n}\n.modal-header {\n padding: 15px;\n border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n margin-top: -2px;\n}\n.modal-title {\n margin: 0;\n line-height: 1.42857143;\n}\n.modal-body {\n position: relative;\n padding: 15px;\n}\n.modal-footer {\n padding: 15px;\n text-align: right;\n border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n margin-left: 5px;\n margin-bottom: 0;\n}\n.modal-footer .btn-group .btn + .btn {\n margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n margin-left: 0;\n}\n.modal-scrollbar-measure {\n position: absolute;\n top: -9999px;\n width: 50px;\n height: 50px;\n overflow: scroll;\n}\n@media (min-width: 768px) {\n .modal-dialog {\n width: 600px;\n margin: 30px auto;\n }\n .modal-content {\n -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n }\n .modal-sm {\n width: 300px;\n }\n}\n@media (min-width: 992px) {\n .modal-lg {\n width: 900px;\n }\n}\n.tooltip {\n position: absolute;\n z-index: 1070;\n display: block;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: normal;\n letter-spacing: normal;\n line-break: auto;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n white-space: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n font-size: 12px;\n opacity: 0;\n filter: alpha(opacity=0);\n}\n.tooltip.in {\n opacity: 0.9;\n filter: alpha(opacity=90);\n}\n.tooltip.top {\n margin-top: -3px;\n padding: 5px 0;\n}\n.tooltip.right {\n margin-left: 3px;\n padding: 0 5px;\n}\n.tooltip.bottom {\n margin-top: 3px;\n padding: 5px 0;\n}\n.tooltip.left {\n margin-left: -3px;\n padding: 0 5px;\n}\n.tooltip-inner {\n max-width: 200px;\n padding: 3px 8px;\n color: #fff;\n text-align: center;\n background-color: #000;\n border-radius: 4px;\n}\n.tooltip-arrow {\n position: absolute;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n bottom: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n bottom: 0;\n right: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n bottom: 0;\n left: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n top: 50%;\n left: 0;\n margin-top: -5px;\n border-width: 5px 5px 5px 0;\n border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n top: 50%;\n right: 0;\n margin-top: -5px;\n border-width: 5px 0 5px 5px;\n border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n top: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n top: 0;\n right: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n top: 0;\n left: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.popover {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 1060;\n display: none;\n max-width: 276px;\n padding: 1px;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: normal;\n letter-spacing: normal;\n line-break: auto;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n white-space: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n font-size: 14px;\n background-color: #fff;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 6px;\n -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n margin-top: -10px;\n}\n.popover.right {\n margin-left: 10px;\n}\n.popover.bottom {\n margin-top: 10px;\n}\n.popover.left {\n margin-left: -10px;\n}\n.popover-title {\n margin: 0;\n padding: 8px 14px;\n font-size: 14px;\n background-color: #f7f7f7;\n border-bottom: 1px solid #ebebeb;\n border-radius: 5px 5px 0 0;\n}\n.popover-content {\n padding: 9px 14px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n position: absolute;\n display: block;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.popover > .arrow {\n border-width: 11px;\n}\n.popover > .arrow:after {\n border-width: 10px;\n content: \"\";\n}\n.popover.top > .arrow {\n left: 50%;\n margin-left: -11px;\n border-bottom-width: 0;\n border-top-color: #999999;\n border-top-color: rgba(0, 0, 0, 0.25);\n bottom: -11px;\n}\n.popover.top > .arrow:after {\n content: \" \";\n bottom: 1px;\n margin-left: -10px;\n border-bottom-width: 0;\n border-top-color: #fff;\n}\n.popover.right > .arrow {\n top: 50%;\n left: -11px;\n margin-top: -11px;\n border-left-width: 0;\n border-right-color: #999999;\n border-right-color: rgba(0, 0, 0, 0.25);\n}\n.popover.right > .arrow:after {\n content: \" \";\n left: 1px;\n bottom: -10px;\n border-left-width: 0;\n border-right-color: #fff;\n}\n.popover.bottom > .arrow {\n left: 50%;\n margin-left: -11px;\n border-top-width: 0;\n border-bottom-color: #999999;\n border-bottom-color: rgba(0, 0, 0, 0.25);\n top: -11px;\n}\n.popover.bottom > .arrow:after {\n content: \" \";\n top: 1px;\n margin-left: -10px;\n border-top-width: 0;\n border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n top: 50%;\n right: -11px;\n margin-top: -11px;\n border-right-width: 0;\n border-left-color: #999999;\n border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n content: \" \";\n right: 1px;\n border-right-width: 0;\n border-left-color: #fff;\n bottom: -10px;\n}\n.carousel {\n position: relative;\n}\n.carousel-inner {\n position: relative;\n overflow: hidden;\n width: 100%;\n}\n.carousel-inner > .item {\n display: none;\n position: relative;\n -webkit-transition: 0.6s ease-in-out left;\n -o-transition: 0.6s ease-in-out left;\n transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n .carousel-inner > .item {\n -webkit-transition: -webkit-transform 0.6s ease-in-out;\n -moz-transition: -moz-transform 0.6s ease-in-out;\n -o-transition: -o-transform 0.6s ease-in-out;\n transition: transform 0.6s ease-in-out;\n -webkit-backface-visibility: hidden;\n -moz-backface-visibility: hidden;\n backface-visibility: hidden;\n -webkit-perspective: 1000px;\n -moz-perspective: 1000px;\n perspective: 1000px;\n }\n .carousel-inner > .item.next,\n .carousel-inner > .item.active.right {\n -webkit-transform: translate3d(100%, 0, 0);\n transform: translate3d(100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.prev,\n .carousel-inner > .item.active.left {\n -webkit-transform: translate3d(-100%, 0, 0);\n transform: translate3d(-100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.next.left,\n .carousel-inner > .item.prev.right,\n .carousel-inner > .item.active {\n -webkit-transform: translate3d(0, 0, 0);\n transform: translate3d(0, 0, 0);\n left: 0;\n }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n display: block;\n}\n.carousel-inner > .active {\n left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n position: absolute;\n top: 0;\n width: 100%;\n}\n.carousel-inner > .next {\n left: 100%;\n}\n.carousel-inner > .prev {\n left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n left: 0;\n}\n.carousel-inner > .active.left {\n left: -100%;\n}\n.carousel-inner > .active.right {\n left: 100%;\n}\n.carousel-control {\n position: absolute;\n top: 0;\n left: 0;\n bottom: 0;\n width: 15%;\n opacity: 0.5;\n filter: alpha(opacity=50);\n font-size: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n background-color: rgba(0, 0, 0, 0);\n}\n.carousel-control.left {\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n}\n.carousel-control.right {\n left: auto;\n right: 0;\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n}\n.carousel-control:hover,\n.carousel-control:focus {\n outline: 0;\n color: #fff;\n text-decoration: none;\n opacity: 0.9;\n filter: alpha(opacity=90);\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n position: absolute;\n top: 50%;\n margin-top: -10px;\n z-index: 5;\n display: inline-block;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n left: 50%;\n margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n right: 50%;\n margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n width: 20px;\n height: 20px;\n line-height: 1;\n font-family: serif;\n}\n.carousel-control .icon-prev:before {\n content: '\\2039';\n}\n.carousel-control .icon-next:before {\n content: '\\203a';\n}\n.carousel-indicators {\n position: absolute;\n bottom: 10px;\n left: 50%;\n z-index: 15;\n width: 60%;\n margin-left: -30%;\n padding-left: 0;\n list-style: none;\n text-align: center;\n}\n.carousel-indicators li {\n display: inline-block;\n width: 10px;\n height: 10px;\n margin: 1px;\n text-indent: -999px;\n border: 1px solid #fff;\n border-radius: 10px;\n cursor: pointer;\n background-color: #000 \\9;\n background-color: rgba(0, 0, 0, 0);\n}\n.carousel-indicators .active {\n margin: 0;\n width: 12px;\n height: 12px;\n background-color: #fff;\n}\n.carousel-caption {\n position: absolute;\n left: 15%;\n right: 15%;\n bottom: 20px;\n z-index: 10;\n padding-top: 20px;\n padding-bottom: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-prev,\n .carousel-control .icon-next {\n width: 30px;\n height: 30px;\n margin-top: -10px;\n font-size: 30px;\n }\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .icon-prev {\n margin-left: -10px;\n }\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-next {\n margin-right: -10px;\n }\n .carousel-caption {\n left: 20%;\n right: 20%;\n padding-bottom: 30px;\n }\n .carousel-indicators {\n bottom: 20px;\n }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n content: \" \";\n display: table;\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n clear: both;\n}\n.center-block {\n display: block;\n margin-left: auto;\n margin-right: auto;\n}\n.pull-right {\n float: right !important;\n}\n.pull-left {\n float: left !important;\n}\n.hide {\n display: none !important;\n}\n.show {\n display: block !important;\n}\n.invisible {\n visibility: hidden;\n}\n.text-hide {\n font: 0/0 a;\n color: transparent;\n text-shadow: none;\n background-color: transparent;\n border: 0;\n}\n.hidden {\n display: none !important;\n}\n.affix {\n position: fixed;\n}\n@-ms-viewport {\n width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n display: none !important;\n}\n@media (max-width: 767px) {\n .visible-xs {\n display: block !important;\n }\n table.visible-xs {\n display: table !important;\n }\n tr.visible-xs {\n display: table-row !important;\n }\n th.visible-xs,\n td.visible-xs {\n display: table-cell !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-block {\n display: block !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline {\n display: inline !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm {\n display: block !important;\n }\n table.visible-sm {\n display: table !important;\n }\n tr.visible-sm {\n display: table-row !important;\n }\n th.visible-sm,\n td.visible-sm {\n display: table-cell !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-block {\n display: block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline {\n display: inline !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md {\n display: block !important;\n }\n table.visible-md {\n display: table !important;\n }\n tr.visible-md {\n display: table-row !important;\n }\n th.visible-md,\n td.visible-md {\n display: table-cell !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-block {\n display: block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline {\n display: inline !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg {\n display: block !important;\n }\n table.visible-lg {\n display: table !important;\n }\n tr.visible-lg {\n display: table-row !important;\n }\n th.visible-lg,\n td.visible-lg {\n display: table-cell !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-block {\n display: block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline {\n display: inline !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline-block {\n display: inline-block !important;\n }\n}\n@media (max-width: 767px) {\n .hidden-xs {\n display: none !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .hidden-sm {\n display: none !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .hidden-md {\n display: none !important;\n }\n}\n@media (min-width: 1200px) {\n .hidden-lg {\n display: none !important;\n }\n}\n.visible-print {\n display: none !important;\n}\n@media print {\n .visible-print {\n display: block !important;\n }\n table.visible-print {\n display: table !important;\n }\n tr.visible-print {\n display: table-row !important;\n }\n th.visible-print,\n td.visible-print {\n display: table-cell !important;\n }\n}\n.visible-print-block {\n display: none !important;\n}\n@media print {\n .visible-print-block {\n display: block !important;\n }\n}\n.visible-print-inline {\n display: none !important;\n}\n@media print {\n .visible-print-inline {\n display: inline !important;\n }\n}\n.visible-print-inline-block {\n display: none !important;\n}\n@media print {\n .visible-print-inline-block {\n display: inline-block !important;\n }\n}\n@media print {\n .hidden-print {\n display: none !important;\n }\n}\n/*# sourceMappingURL=bootstrap.css.map */","/*!\n * Bootstrap v3.3.7 (http://getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n font-family: sans-serif;\n -webkit-text-size-adjust: 100%;\n -ms-text-size-adjust: 100%;\n}\nbody {\n margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block;\n vertical-align: baseline;\n}\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n[hidden],\ntemplate {\n display: none;\n}\na {\n background-color: transparent;\n}\na:active,\na:hover {\n outline: 0;\n}\nabbr[title] {\n border-bottom: 1px dotted;\n}\nb,\nstrong {\n font-weight: bold;\n}\ndfn {\n font-style: italic;\n}\nh1 {\n margin: .67em 0;\n font-size: 2em;\n}\nmark {\n color: #000;\n background: #ff0;\n}\nsmall {\n font-size: 80%;\n}\nsub,\nsup {\n position: relative;\n font-size: 75%;\n line-height: 0;\n vertical-align: baseline;\n}\nsup {\n top: -.5em;\n}\nsub {\n bottom: -.25em;\n}\nimg {\n border: 0;\n}\nsvg:not(:root) {\n overflow: hidden;\n}\nfigure {\n margin: 1em 40px;\n}\nhr {\n height: 0;\n -webkit-box-sizing: content-box;\n -moz-box-sizing: content-box;\n box-sizing: content-box;\n}\npre {\n overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n margin: 0;\n font: inherit;\n color: inherit;\n}\nbutton {\n overflow: visible;\n}\nbutton,\nselect {\n text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button;\n cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n padding: 0;\n border: 0;\n}\ninput {\n line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: content-box;\n -moz-box-sizing: content-box;\n box-sizing: content-box;\n -webkit-appearance: textfield;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\nfieldset {\n padding: .35em .625em .75em;\n margin: 0 2px;\n border: 1px solid #c0c0c0;\n}\nlegend {\n padding: 0;\n border: 0;\n}\ntextarea {\n overflow: auto;\n}\noptgroup {\n font-weight: bold;\n}\ntable {\n border-spacing: 0;\n border-collapse: collapse;\n}\ntd,\nth {\n padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n *,\n *:before,\n *:after {\n color: #000 !important;\n text-shadow: none !important;\n background: transparent !important;\n -webkit-box-shadow: none !important;\n box-shadow: none !important;\n }\n a,\n a:visited {\n text-decoration: underline;\n }\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n pre,\n blockquote {\n border: 1px solid #999;\n\n page-break-inside: avoid;\n }\n thead {\n display: table-header-group;\n }\n tr,\n img {\n page-break-inside: avoid;\n }\n img {\n max-width: 100% !important;\n }\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n h2,\n h3 {\n page-break-after: avoid;\n }\n .navbar {\n display: none;\n }\n .btn > .caret,\n .dropup > .btn > .caret {\n border-top-color: #000 !important;\n }\n .label {\n border: 1px solid #000;\n }\n .table {\n border-collapse: collapse !important;\n }\n .table td,\n .table th {\n background-color: #fff !important;\n }\n .table-bordered th,\n .table-bordered td {\n border: 1px solid #ddd !important;\n }\n}\n@font-face {\n font-family: 'Glyphicons Halflings';\n\n src: url('../fonts/glyphicons-halflings-regular.eot');\n src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: 'Glyphicons Halflings';\n font-style: normal;\n font-weight: normal;\n line-height: 1;\n\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n content: \"\\002a\";\n}\n.glyphicon-plus:before {\n content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n content: \"\\270f\";\n}\n.glyphicon-glass:before {\n content: \"\\e001\";\n}\n.glyphicon-music:before {\n content: \"\\e002\";\n}\n.glyphicon-search:before {\n content: \"\\e003\";\n}\n.glyphicon-heart:before {\n content: \"\\e005\";\n}\n.glyphicon-star:before {\n content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n content: \"\\e007\";\n}\n.glyphicon-user:before {\n content: \"\\e008\";\n}\n.glyphicon-film:before {\n content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n content: \"\\e010\";\n}\n.glyphicon-th:before {\n content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n content: \"\\e012\";\n}\n.glyphicon-ok:before {\n content: \"\\e013\";\n}\n.glyphicon-remove:before {\n content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n content: \"\\e016\";\n}\n.glyphicon-off:before {\n content: \"\\e017\";\n}\n.glyphicon-signal:before {\n content: \"\\e018\";\n}\n.glyphicon-cog:before {\n content: \"\\e019\";\n}\n.glyphicon-trash:before {\n content: \"\\e020\";\n}\n.glyphicon-home:before {\n content: \"\\e021\";\n}\n.glyphicon-file:before {\n content: \"\\e022\";\n}\n.glyphicon-time:before {\n content: \"\\e023\";\n}\n.glyphicon-road:before {\n content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n content: \"\\e025\";\n}\n.glyphicon-download:before {\n content: \"\\e026\";\n}\n.glyphicon-upload:before {\n content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n content: \"\\e032\";\n}\n.glyphicon-lock:before {\n content: \"\\e033\";\n}\n.glyphicon-flag:before {\n content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n content: \"\\e040\";\n}\n.glyphicon-tag:before {\n content: \"\\e041\";\n}\n.glyphicon-tags:before {\n content: \"\\e042\";\n}\n.glyphicon-book:before {\n content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n content: \"\\e044\";\n}\n.glyphicon-print:before {\n content: \"\\e045\";\n}\n.glyphicon-camera:before {\n content: \"\\e046\";\n}\n.glyphicon-font:before {\n content: \"\\e047\";\n}\n.glyphicon-bold:before {\n content: \"\\e048\";\n}\n.glyphicon-italic:before {\n content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n content: \"\\e055\";\n}\n.glyphicon-list:before {\n content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n content: \"\\e059\";\n}\n.glyphicon-picture:before {\n content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n content: \"\\e063\";\n}\n.glyphicon-tint:before {\n content: \"\\e064\";\n}\n.glyphicon-edit:before {\n content: \"\\e065\";\n}\n.glyphicon-share:before {\n content: \"\\e066\";\n}\n.glyphicon-check:before {\n content: \"\\e067\";\n}\n.glyphicon-move:before {\n content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n content: \"\\e070\";\n}\n.glyphicon-backward:before {\n content: \"\\e071\";\n}\n.glyphicon-play:before {\n content: \"\\e072\";\n}\n.glyphicon-pause:before {\n content: \"\\e073\";\n}\n.glyphicon-stop:before {\n content: \"\\e074\";\n}\n.glyphicon-forward:before {\n content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n content: \"\\e077\";\n}\n.glyphicon-eject:before {\n content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n content: \"\\e101\";\n}\n.glyphicon-gift:before {\n content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n content: \"\\e103\";\n}\n.glyphicon-fire:before {\n content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n content: \"\\e107\";\n}\n.glyphicon-plane:before {\n content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n content: \"\\e109\";\n}\n.glyphicon-random:before {\n content: \"\\e110\";\n}\n.glyphicon-comment:before {\n content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n content: \"\\e122\";\n}\n.glyphicon-bell:before {\n content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n content: \"\\e134\";\n}\n.glyphicon-globe:before {\n content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n content: \"\\e137\";\n}\n.glyphicon-filter:before {\n content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n content: \"\\e143\";\n}\n.glyphicon-link:before {\n content: \"\\e144\";\n}\n.glyphicon-phone:before {\n content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n content: \"\\e146\";\n}\n.glyphicon-usd:before {\n content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n content: \"\\e149\";\n}\n.glyphicon-sort:before {\n content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n content: \"\\e157\";\n}\n.glyphicon-expand:before {\n content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n content: \"\\e161\";\n}\n.glyphicon-flash:before {\n content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n content: \"\\e164\";\n}\n.glyphicon-record:before {\n content: \"\\e165\";\n}\n.glyphicon-save:before {\n content: \"\\e166\";\n}\n.glyphicon-open:before {\n content: \"\\e167\";\n}\n.glyphicon-saved:before {\n content: \"\\e168\";\n}\n.glyphicon-import:before {\n content: \"\\e169\";\n}\n.glyphicon-export:before {\n content: \"\\e170\";\n}\n.glyphicon-send:before {\n content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n content: \"\\e179\";\n}\n.glyphicon-header:before {\n content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n content: \"\\e183\";\n}\n.glyphicon-tower:before {\n content: \"\\e184\";\n}\n.glyphicon-stats:before {\n content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n content: \"\\e200\";\n}\n.glyphicon-cd:before {\n content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n content: \"\\e204\";\n}\n.glyphicon-copy:before {\n content: \"\\e205\";\n}\n.glyphicon-paste:before {\n content: \"\\e206\";\n}\n.glyphicon-alert:before {\n content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n content: \"\\e210\";\n}\n.glyphicon-king:before {\n content: \"\\e211\";\n}\n.glyphicon-queen:before {\n content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n content: \"\\e214\";\n}\n.glyphicon-knight:before {\n content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n content: \"\\e216\";\n}\n.glyphicon-tent:before {\n content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n content: \"\\e218\";\n}\n.glyphicon-bed:before {\n content: \"\\e219\";\n}\n.glyphicon-apple:before {\n content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n content: \"\\e227\";\n}\n.glyphicon-btc:before {\n content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n content: \"\\e227\";\n}\n.glyphicon-yen:before {\n content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n content: \"\\e232\";\n}\n.glyphicon-education:before {\n content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n content: \"\\e237\";\n}\n.glyphicon-oil:before {\n content: \"\\e238\";\n}\n.glyphicon-grain:before {\n content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n content: \"\\e253\";\n}\n.glyphicon-console:before {\n content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n content: \"\\e260\";\n}\n* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n*:before,\n*:after {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\nhtml {\n font-size: 10px;\n\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n line-height: 1.42857143;\n color: #333;\n background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\na {\n color: #337ab7;\n text-decoration: none;\n}\na:hover,\na:focus {\n color: #23527c;\n text-decoration: underline;\n}\na:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\nfigure {\n margin: 0;\n}\nimg {\n vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n display: block;\n max-width: 100%;\n height: auto;\n}\n.img-rounded {\n border-radius: 6px;\n}\n.img-thumbnail {\n display: inline-block;\n max-width: 100%;\n height: auto;\n padding: 4px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: all .2s ease-in-out;\n -o-transition: all .2s ease-in-out;\n transition: all .2s ease-in-out;\n}\n.img-circle {\n border-radius: 50%;\n}\nhr {\n margin-top: 20px;\n margin-bottom: 20px;\n border: 0;\n border-top: 1px solid #eee;\n}\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n}\n[role=\"button\"] {\n cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n font-weight: normal;\n line-height: 1;\n color: #777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n margin-top: 20px;\n margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n margin-top: 10px;\n margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n font-size: 75%;\n}\nh1,\n.h1 {\n font-size: 36px;\n}\nh2,\n.h2 {\n font-size: 30px;\n}\nh3,\n.h3 {\n font-size: 24px;\n}\nh4,\n.h4 {\n font-size: 18px;\n}\nh5,\n.h5 {\n font-size: 14px;\n}\nh6,\n.h6 {\n font-size: 12px;\n}\np {\n margin: 0 0 10px;\n}\n.lead {\n margin-bottom: 20px;\n font-size: 16px;\n font-weight: 300;\n line-height: 1.4;\n}\n@media (min-width: 768px) {\n .lead {\n font-size: 21px;\n }\n}\nsmall,\n.small {\n font-size: 85%;\n}\nmark,\n.mark {\n padding: .2em;\n background-color: #fcf8e3;\n}\n.text-left {\n text-align: left;\n}\n.text-right {\n text-align: right;\n}\n.text-center {\n text-align: center;\n}\n.text-justify {\n text-align: justify;\n}\n.text-nowrap {\n white-space: nowrap;\n}\n.text-lowercase {\n text-transform: lowercase;\n}\n.text-uppercase {\n text-transform: uppercase;\n}\n.text-capitalize {\n text-transform: capitalize;\n}\n.text-muted {\n color: #777;\n}\n.text-primary {\n color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n color: #286090;\n}\n.text-success {\n color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n color: #2b542c;\n}\n.text-info {\n color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n color: #245269;\n}\n.text-warning {\n color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n color: #66512c;\n}\n.text-danger {\n color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n color: #843534;\n}\n.bg-primary {\n color: #fff;\n background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n background-color: #286090;\n}\n.bg-success {\n background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n background-color: #c1e2b3;\n}\n.bg-info {\n background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n background-color: #afd9ee;\n}\n.bg-warning {\n background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n background-color: #f7ecb5;\n}\n.bg-danger {\n background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n background-color: #e4b9b9;\n}\n.page-header {\n padding-bottom: 9px;\n margin: 40px 0 20px;\n border-bottom: 1px solid #eee;\n}\nul,\nol {\n margin-top: 0;\n margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n margin-bottom: 0;\n}\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n.list-inline {\n padding-left: 0;\n margin-left: -5px;\n list-style: none;\n}\n.list-inline > li {\n display: inline-block;\n padding-right: 5px;\n padding-left: 5px;\n}\ndl {\n margin-top: 0;\n margin-bottom: 20px;\n}\ndt,\ndd {\n line-height: 1.42857143;\n}\ndt {\n font-weight: bold;\n}\ndd {\n margin-left: 0;\n}\n@media (min-width: 768px) {\n .dl-horizontal dt {\n float: left;\n width: 160px;\n overflow: hidden;\n clear: left;\n text-align: right;\n text-overflow: ellipsis;\n white-space: nowrap;\n }\n .dl-horizontal dd {\n margin-left: 180px;\n }\n}\nabbr[title],\nabbr[data-original-title] {\n cursor: help;\n border-bottom: 1px dotted #777;\n}\n.initialism {\n font-size: 90%;\n text-transform: uppercase;\n}\nblockquote {\n padding: 10px 20px;\n margin: 0 0 20px;\n font-size: 17.5px;\n border-left: 5px solid #eee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n display: block;\n font-size: 80%;\n line-height: 1.42857143;\n color: #777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n text-align: right;\n border-right: 5px solid #eee;\n border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n content: '\\00A0 \\2014';\n}\naddress {\n margin-bottom: 20px;\n font-style: normal;\n line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n}\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: #fff;\n background-color: #333;\n border-radius: 3px;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n}\nkbd kbd {\n padding: 0;\n font-size: 100%;\n font-weight: bold;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\npre {\n display: block;\n padding: 9.5px;\n margin: 0 0 10px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #333;\n word-break: break-all;\n word-wrap: break-word;\n background-color: #f5f5f5;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\npre code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n}\n.pre-scrollable {\n max-height: 340px;\n overflow-y: scroll;\n}\n.container {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n@media (min-width: 768px) {\n .container {\n width: 750px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 970px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1170px;\n }\n}\n.container-fluid {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n.row {\n margin-right: -15px;\n margin-left: -15px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n position: relative;\n min-height: 1px;\n padding-right: 15px;\n padding-left: 15px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n float: left;\n}\n.col-xs-12 {\n width: 100%;\n}\n.col-xs-11 {\n width: 91.66666667%;\n}\n.col-xs-10 {\n width: 83.33333333%;\n}\n.col-xs-9 {\n width: 75%;\n}\n.col-xs-8 {\n width: 66.66666667%;\n}\n.col-xs-7 {\n width: 58.33333333%;\n}\n.col-xs-6 {\n width: 50%;\n}\n.col-xs-5 {\n width: 41.66666667%;\n}\n.col-xs-4 {\n width: 33.33333333%;\n}\n.col-xs-3 {\n width: 25%;\n}\n.col-xs-2 {\n width: 16.66666667%;\n}\n.col-xs-1 {\n width: 8.33333333%;\n}\n.col-xs-pull-12 {\n right: 100%;\n}\n.col-xs-pull-11 {\n right: 91.66666667%;\n}\n.col-xs-pull-10 {\n right: 83.33333333%;\n}\n.col-xs-pull-9 {\n right: 75%;\n}\n.col-xs-pull-8 {\n right: 66.66666667%;\n}\n.col-xs-pull-7 {\n right: 58.33333333%;\n}\n.col-xs-pull-6 {\n right: 50%;\n}\n.col-xs-pull-5 {\n right: 41.66666667%;\n}\n.col-xs-pull-4 {\n right: 33.33333333%;\n}\n.col-xs-pull-3 {\n right: 25%;\n}\n.col-xs-pull-2 {\n right: 16.66666667%;\n}\n.col-xs-pull-1 {\n right: 8.33333333%;\n}\n.col-xs-pull-0 {\n right: auto;\n}\n.col-xs-push-12 {\n left: 100%;\n}\n.col-xs-push-11 {\n left: 91.66666667%;\n}\n.col-xs-push-10 {\n left: 83.33333333%;\n}\n.col-xs-push-9 {\n left: 75%;\n}\n.col-xs-push-8 {\n left: 66.66666667%;\n}\n.col-xs-push-7 {\n left: 58.33333333%;\n}\n.col-xs-push-6 {\n left: 50%;\n}\n.col-xs-push-5 {\n left: 41.66666667%;\n}\n.col-xs-push-4 {\n left: 33.33333333%;\n}\n.col-xs-push-3 {\n left: 25%;\n}\n.col-xs-push-2 {\n left: 16.66666667%;\n}\n.col-xs-push-1 {\n left: 8.33333333%;\n}\n.col-xs-push-0 {\n left: auto;\n}\n.col-xs-offset-12 {\n margin-left: 100%;\n}\n.col-xs-offset-11 {\n margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n margin-left: 75%;\n}\n.col-xs-offset-8 {\n margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n margin-left: 50%;\n}\n.col-xs-offset-5 {\n margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n margin-left: 25%;\n}\n.col-xs-offset-2 {\n margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n margin-left: 0;\n}\n@media (min-width: 768px) {\n .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n float: left;\n }\n .col-sm-12 {\n width: 100%;\n }\n .col-sm-11 {\n width: 91.66666667%;\n }\n .col-sm-10 {\n width: 83.33333333%;\n }\n .col-sm-9 {\n width: 75%;\n }\n .col-sm-8 {\n width: 66.66666667%;\n }\n .col-sm-7 {\n width: 58.33333333%;\n }\n .col-sm-6 {\n width: 50%;\n }\n .col-sm-5 {\n width: 41.66666667%;\n }\n .col-sm-4 {\n width: 33.33333333%;\n }\n .col-sm-3 {\n width: 25%;\n }\n .col-sm-2 {\n width: 16.66666667%;\n }\n .col-sm-1 {\n width: 8.33333333%;\n }\n .col-sm-pull-12 {\n right: 100%;\n }\n .col-sm-pull-11 {\n right: 91.66666667%;\n }\n .col-sm-pull-10 {\n right: 83.33333333%;\n }\n .col-sm-pull-9 {\n right: 75%;\n }\n .col-sm-pull-8 {\n right: 66.66666667%;\n }\n .col-sm-pull-7 {\n right: 58.33333333%;\n }\n .col-sm-pull-6 {\n right: 50%;\n }\n .col-sm-pull-5 {\n right: 41.66666667%;\n }\n .col-sm-pull-4 {\n right: 33.33333333%;\n }\n .col-sm-pull-3 {\n right: 25%;\n }\n .col-sm-pull-2 {\n right: 16.66666667%;\n }\n .col-sm-pull-1 {\n right: 8.33333333%;\n }\n .col-sm-pull-0 {\n right: auto;\n }\n .col-sm-push-12 {\n left: 100%;\n }\n .col-sm-push-11 {\n left: 91.66666667%;\n }\n .col-sm-push-10 {\n left: 83.33333333%;\n }\n .col-sm-push-9 {\n left: 75%;\n }\n .col-sm-push-8 {\n left: 66.66666667%;\n }\n .col-sm-push-7 {\n left: 58.33333333%;\n }\n .col-sm-push-6 {\n left: 50%;\n }\n .col-sm-push-5 {\n left: 41.66666667%;\n }\n .col-sm-push-4 {\n left: 33.33333333%;\n }\n .col-sm-push-3 {\n left: 25%;\n }\n .col-sm-push-2 {\n left: 16.66666667%;\n }\n .col-sm-push-1 {\n left: 8.33333333%;\n }\n .col-sm-push-0 {\n left: auto;\n }\n .col-sm-offset-12 {\n margin-left: 100%;\n }\n .col-sm-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-sm-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-sm-offset-9 {\n margin-left: 75%;\n }\n .col-sm-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-sm-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-sm-offset-6 {\n margin-left: 50%;\n }\n .col-sm-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-sm-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-sm-offset-3 {\n margin-left: 25%;\n }\n .col-sm-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-sm-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-sm-offset-0 {\n margin-left: 0;\n }\n}\n@media (min-width: 992px) {\n .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n float: left;\n }\n .col-md-12 {\n width: 100%;\n }\n .col-md-11 {\n width: 91.66666667%;\n }\n .col-md-10 {\n width: 83.33333333%;\n }\n .col-md-9 {\n width: 75%;\n }\n .col-md-8 {\n width: 66.66666667%;\n }\n .col-md-7 {\n width: 58.33333333%;\n }\n .col-md-6 {\n width: 50%;\n }\n .col-md-5 {\n width: 41.66666667%;\n }\n .col-md-4 {\n width: 33.33333333%;\n }\n .col-md-3 {\n width: 25%;\n }\n .col-md-2 {\n width: 16.66666667%;\n }\n .col-md-1 {\n width: 8.33333333%;\n }\n .col-md-pull-12 {\n right: 100%;\n }\n .col-md-pull-11 {\n right: 91.66666667%;\n }\n .col-md-pull-10 {\n right: 83.33333333%;\n }\n .col-md-pull-9 {\n right: 75%;\n }\n .col-md-pull-8 {\n right: 66.66666667%;\n }\n .col-md-pull-7 {\n right: 58.33333333%;\n }\n .col-md-pull-6 {\n right: 50%;\n }\n .col-md-pull-5 {\n right: 41.66666667%;\n }\n .col-md-pull-4 {\n right: 33.33333333%;\n }\n .col-md-pull-3 {\n right: 25%;\n }\n .col-md-pull-2 {\n right: 16.66666667%;\n }\n .col-md-pull-1 {\n right: 8.33333333%;\n }\n .col-md-pull-0 {\n right: auto;\n }\n .col-md-push-12 {\n left: 100%;\n }\n .col-md-push-11 {\n left: 91.66666667%;\n }\n .col-md-push-10 {\n left: 83.33333333%;\n }\n .col-md-push-9 {\n left: 75%;\n }\n .col-md-push-8 {\n left: 66.66666667%;\n }\n .col-md-push-7 {\n left: 58.33333333%;\n }\n .col-md-push-6 {\n left: 50%;\n }\n .col-md-push-5 {\n left: 41.66666667%;\n }\n .col-md-push-4 {\n left: 33.33333333%;\n }\n .col-md-push-3 {\n left: 25%;\n }\n .col-md-push-2 {\n left: 16.66666667%;\n }\n .col-md-push-1 {\n left: 8.33333333%;\n }\n .col-md-push-0 {\n left: auto;\n }\n .col-md-offset-12 {\n margin-left: 100%;\n }\n .col-md-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-md-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-md-offset-9 {\n margin-left: 75%;\n }\n .col-md-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-md-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-md-offset-6 {\n margin-left: 50%;\n }\n .col-md-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-md-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-md-offset-3 {\n margin-left: 25%;\n }\n .col-md-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-md-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-md-offset-0 {\n margin-left: 0;\n }\n}\n@media (min-width: 1200px) {\n .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n float: left;\n }\n .col-lg-12 {\n width: 100%;\n }\n .col-lg-11 {\n width: 91.66666667%;\n }\n .col-lg-10 {\n width: 83.33333333%;\n }\n .col-lg-9 {\n width: 75%;\n }\n .col-lg-8 {\n width: 66.66666667%;\n }\n .col-lg-7 {\n width: 58.33333333%;\n }\n .col-lg-6 {\n width: 50%;\n }\n .col-lg-5 {\n width: 41.66666667%;\n }\n .col-lg-4 {\n width: 33.33333333%;\n }\n .col-lg-3 {\n width: 25%;\n }\n .col-lg-2 {\n width: 16.66666667%;\n }\n .col-lg-1 {\n width: 8.33333333%;\n }\n .col-lg-pull-12 {\n right: 100%;\n }\n .col-lg-pull-11 {\n right: 91.66666667%;\n }\n .col-lg-pull-10 {\n right: 83.33333333%;\n }\n .col-lg-pull-9 {\n right: 75%;\n }\n .col-lg-pull-8 {\n right: 66.66666667%;\n }\n .col-lg-pull-7 {\n right: 58.33333333%;\n }\n .col-lg-pull-6 {\n right: 50%;\n }\n .col-lg-pull-5 {\n right: 41.66666667%;\n }\n .col-lg-pull-4 {\n right: 33.33333333%;\n }\n .col-lg-pull-3 {\n right: 25%;\n }\n .col-lg-pull-2 {\n right: 16.66666667%;\n }\n .col-lg-pull-1 {\n right: 8.33333333%;\n }\n .col-lg-pull-0 {\n right: auto;\n }\n .col-lg-push-12 {\n left: 100%;\n }\n .col-lg-push-11 {\n left: 91.66666667%;\n }\n .col-lg-push-10 {\n left: 83.33333333%;\n }\n .col-lg-push-9 {\n left: 75%;\n }\n .col-lg-push-8 {\n left: 66.66666667%;\n }\n .col-lg-push-7 {\n left: 58.33333333%;\n }\n .col-lg-push-6 {\n left: 50%;\n }\n .col-lg-push-5 {\n left: 41.66666667%;\n }\n .col-lg-push-4 {\n left: 33.33333333%;\n }\n .col-lg-push-3 {\n left: 25%;\n }\n .col-lg-push-2 {\n left: 16.66666667%;\n }\n .col-lg-push-1 {\n left: 8.33333333%;\n }\n .col-lg-push-0 {\n left: auto;\n }\n .col-lg-offset-12 {\n margin-left: 100%;\n }\n .col-lg-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-lg-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-lg-offset-9 {\n margin-left: 75%;\n }\n .col-lg-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-lg-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-lg-offset-6 {\n margin-left: 50%;\n }\n .col-lg-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-lg-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-lg-offset-3 {\n margin-left: 25%;\n }\n .col-lg-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-lg-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-lg-offset-0 {\n margin-left: 0;\n }\n}\ntable {\n background-color: transparent;\n}\ncaption {\n padding-top: 8px;\n padding-bottom: 8px;\n color: #777;\n text-align: left;\n}\nth {\n text-align: left;\n}\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n border-top: 0;\n}\n.table > tbody + tbody {\n border-top: 2px solid #ddd;\n}\n.table .table {\n background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n padding: 5px;\n}\n.table-bordered {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n position: static;\n display: table-column;\n float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n position: static;\n display: table-cell;\n float: none;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n background-color: #ebcccc;\n}\n.table-responsive {\n min-height: .01%;\n overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n .table-responsive {\n width: 100%;\n margin-bottom: 15px;\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid #ddd;\n }\n .table-responsive > .table {\n margin-bottom: 0;\n }\n .table-responsive > .table > thead > tr > th,\n .table-responsive > .table > tbody > tr > th,\n .table-responsive > .table > tfoot > tr > th,\n .table-responsive > .table > thead > tr > td,\n .table-responsive > .table > tbody > tr > td,\n .table-responsive > .table > tfoot > tr > td {\n white-space: nowrap;\n }\n .table-responsive > .table-bordered {\n border: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:first-child,\n .table-responsive > .table-bordered > tbody > tr > th:first-child,\n .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n .table-responsive > .table-bordered > thead > tr > td:first-child,\n .table-responsive > .table-bordered > tbody > tr > td:first-child,\n .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:last-child,\n .table-responsive > .table-bordered > tbody > tr > th:last-child,\n .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n .table-responsive > .table-bordered > thead > tr > td:last-child,\n .table-responsive > .table-bordered > tbody > tr > td:last-child,\n .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n }\n .table-responsive > .table-bordered > tbody > tr:last-child > th,\n .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n .table-responsive > .table-bordered > tbody > tr:last-child > td,\n .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n border-bottom: 0;\n }\n}\nfieldset {\n min-width: 0;\n padding: 0;\n margin: 0;\n border: 0;\n}\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: 20px;\n font-size: 21px;\n line-height: inherit;\n color: #333;\n border: 0;\n border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n display: inline-block;\n max-width: 100%;\n margin-bottom: 5px;\n font-weight: bold;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9;\n line-height: normal;\n}\ninput[type=\"file\"] {\n display: block;\n}\ninput[type=\"range\"] {\n display: block;\n width: 100%;\n}\nselect[multiple],\nselect[size] {\n height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\noutput {\n display: block;\n padding-top: 7px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555;\n}\n.form-control {\n display: block;\n width: 100%;\n height: 34px;\n padding: 6px 12px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n}\n.form-control::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n color: #999;\n}\n.form-control::-webkit-input-placeholder {\n color: #999;\n}\n.form-control::-ms-expand {\n background-color: transparent;\n border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n background-color: #eee;\n opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n cursor: not-allowed;\n}\ntextarea.form-control {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: none;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"].form-control,\n input[type=\"time\"].form-control,\n input[type=\"datetime-local\"].form-control,\n input[type=\"month\"].form-control {\n line-height: 34px;\n }\n input[type=\"date\"].input-sm,\n input[type=\"time\"].input-sm,\n input[type=\"datetime-local\"].input-sm,\n input[type=\"month\"].input-sm,\n .input-group-sm input[type=\"date\"],\n .input-group-sm input[type=\"time\"],\n .input-group-sm input[type=\"datetime-local\"],\n .input-group-sm input[type=\"month\"] {\n line-height: 30px;\n }\n input[type=\"date\"].input-lg,\n input[type=\"time\"].input-lg,\n input[type=\"datetime-local\"].input-lg,\n input[type=\"month\"].input-lg,\n .input-group-lg input[type=\"date\"],\n .input-group-lg input[type=\"time\"],\n .input-group-lg input[type=\"datetime-local\"],\n .input-group-lg input[type=\"month\"] {\n line-height: 46px;\n }\n}\n.form-group {\n margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n min-height: 20px;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: normal;\n cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n position: absolute;\n margin-top: 4px \\9;\n margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n position: relative;\n display: inline-block;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: normal;\n vertical-align: middle;\n cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n margin-top: 0;\n margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n cursor: not-allowed;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n cursor: not-allowed;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n cursor: not-allowed;\n}\n.form-control-static {\n min-height: 34px;\n padding-top: 7px;\n padding-bottom: 7px;\n margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n padding-right: 0;\n padding-left: 0;\n}\n.input-sm {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-sm {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n height: auto;\n}\n.form-group-sm .form-control {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.form-group-sm select.form-control {\n height: 30px;\n line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n height: auto;\n}\n.form-group-sm .form-control-static {\n height: 30px;\n min-height: 32px;\n padding: 6px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.input-lg {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-lg {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n height: auto;\n}\n.form-group-lg .form-control {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\n.form-group-lg select.form-control {\n height: 46px;\n line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n height: auto;\n}\n.form-group-lg .form-control-static {\n height: 46px;\n min-height: 38px;\n padding: 11px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n}\n.has-feedback {\n position: relative;\n}\n.has-feedback .form-control {\n padding-right: 42.5px;\n}\n.form-control-feedback {\n position: absolute;\n top: 0;\n right: 0;\n z-index: 2;\n display: block;\n width: 34px;\n height: 34px;\n line-height: 34px;\n text-align: center;\n pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n width: 46px;\n height: 46px;\n line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n width: 30px;\n height: 30px;\n line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n color: #3c763d;\n}\n.has-success .form-control {\n border-color: #3c763d;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-success .form-control:focus {\n border-color: #2b542c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n color: #8a6d3b;\n}\n.has-warning .form-control {\n border-color: #8a6d3b;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-warning .form-control:focus {\n border-color: #66512c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n color: #a94442;\n}\n.has-error .form-control {\n border-color: #a94442;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-error .form-control:focus {\n border-color: #843534;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n color: #a94442;\n background-color: #f2dede;\n border-color: #a94442;\n}\n.has-error .form-control-feedback {\n color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n top: 0;\n}\n.help-block {\n display: block;\n margin-top: 5px;\n margin-bottom: 10px;\n color: #737373;\n}\n@media (min-width: 768px) {\n .form-inline .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .form-inline .form-control-static {\n display: inline-block;\n }\n .form-inline .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .form-inline .input-group .input-group-addon,\n .form-inline .input-group .input-group-btn,\n .form-inline .input-group .form-control {\n width: auto;\n }\n .form-inline .input-group > .form-control {\n width: 100%;\n }\n .form-inline .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio,\n .form-inline .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio label,\n .form-inline .checkbox label {\n padding-left: 0;\n }\n .form-inline .radio input[type=\"radio\"],\n .form-inline .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .form-inline .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n padding-top: 7px;\n margin-top: 0;\n margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n min-height: 27px;\n}\n.form-horizontal .form-group {\n margin-right: -15px;\n margin-left: -15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .control-label {\n padding-top: 7px;\n margin-bottom: 0;\n text-align: right;\n }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n right: 15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-lg .control-label {\n padding-top: 11px;\n font-size: 18px;\n }\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-sm .control-label {\n padding-top: 6px;\n font-size: 12px;\n }\n}\n.btn {\n display: inline-block;\n padding: 6px 12px;\n margin-bottom: 0;\n font-size: 14px;\n font-weight: normal;\n line-height: 1.42857143;\n text-align: center;\n white-space: nowrap;\n vertical-align: middle;\n -ms-touch-action: manipulation;\n touch-action: manipulation;\n cursor: pointer;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n color: #333;\n text-decoration: none;\n}\n.btn:active,\n.btn.active {\n background-image: none;\n outline: 0;\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n cursor: not-allowed;\n filter: alpha(opacity=65);\n -webkit-box-shadow: none;\n box-shadow: none;\n opacity: .65;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n pointer-events: none;\n}\n.btn-default {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\n.btn-default:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n background-image: none;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default .badge {\n color: #fff;\n background-color: #333;\n}\n.btn-primary {\n color: #fff;\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n color: #fff;\n background-color: #286090;\n border-color: #122b40;\n}\n.btn-primary:hover {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n color: #fff;\n background-color: #204d74;\n border-color: #122b40;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n background-image: none;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.btn-success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n color: #fff;\n background-color: #449d44;\n border-color: #255625;\n}\n.btn-success:hover {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n color: #fff;\n background-color: #398439;\n border-color: #255625;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n background-image: none;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.btn-info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n color: #fff;\n background-color: #31b0d5;\n border-color: #1b6d85;\n}\n.btn-info:hover {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n color: #fff;\n background-color: #269abc;\n border-color: #1b6d85;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n background-image: none;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.btn-warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n color: #fff;\n background-color: #ec971f;\n border-color: #985f0d;\n}\n.btn-warning:hover {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n color: #fff;\n background-color: #d58512;\n border-color: #985f0d;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n background-image: none;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.btn-danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n color: #fff;\n background-color: #c9302c;\n border-color: #761c19;\n}\n.btn-danger:hover {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n color: #fff;\n background-color: #ac2925;\n border-color: #761c19;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n background-image: none;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\n.btn-link {\n font-weight: normal;\n color: #337ab7;\n border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n background-color: transparent;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n color: #23527c;\n text-decoration: underline;\n background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n color: #777;\n text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n padding: 1px 5px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-block {\n display: block;\n width: 100%;\n}\n.btn-block + .btn-block {\n margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n width: 100%;\n}\n.fade {\n opacity: 0;\n -webkit-transition: opacity .15s linear;\n -o-transition: opacity .15s linear;\n transition: opacity .15s linear;\n}\n.fade.in {\n opacity: 1;\n}\n.collapse {\n display: none;\n}\n.collapse.in {\n display: block;\n}\ntr.collapse.in {\n display: table-row;\n}\ntbody.collapse.in {\n display: table-row-group;\n}\n.collapsing {\n position: relative;\n height: 0;\n overflow: hidden;\n -webkit-transition-timing-function: ease;\n -o-transition-timing-function: ease;\n transition-timing-function: ease;\n -webkit-transition-duration: .35s;\n -o-transition-duration: .35s;\n transition-duration: .35s;\n -webkit-transition-property: height, visibility;\n -o-transition-property: height, visibility;\n transition-property: height, visibility;\n}\n.caret {\n display: inline-block;\n width: 0;\n height: 0;\n margin-left: 2px;\n vertical-align: middle;\n border-top: 4px dashed;\n border-top: 4px solid \\9;\n border-right: 4px solid transparent;\n border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n position: relative;\n}\n.dropdown-toggle:focus {\n outline: 0;\n}\n.dropdown-menu {\n position: absolute;\n top: 100%;\n left: 0;\n z-index: 1000;\n display: none;\n float: left;\n min-width: 160px;\n padding: 5px 0;\n margin: 2px 0 0;\n font-size: 14px;\n text-align: left;\n list-style: none;\n background-color: #fff;\n -webkit-background-clip: padding-box;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, .15);\n border-radius: 4px;\n -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n}\n.dropdown-menu.pull-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu .divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: normal;\n line-height: 1.42857143;\n color: #333;\n white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n color: #262626;\n text-decoration: none;\n background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n color: #fff;\n text-decoration: none;\n background-color: #337ab7;\n outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n color: #777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n text-decoration: none;\n cursor: not-allowed;\n background-color: transparent;\n background-image: none;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n display: block;\n}\n.open > a {\n outline: 0;\n}\n.dropdown-menu-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu-left {\n right: auto;\n left: 0;\n}\n.dropdown-header {\n display: block;\n padding: 3px 20px;\n font-size: 12px;\n line-height: 1.42857143;\n color: #777;\n white-space: nowrap;\n}\n.dropdown-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 990;\n}\n.pull-right > .dropdown-menu {\n right: 0;\n left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n content: \"\";\n border-top: 0;\n border-bottom: 4px dashed;\n border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n top: auto;\n bottom: 100%;\n margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n .navbar-right .dropdown-menu {\n right: 0;\n left: auto;\n }\n .navbar-right .dropdown-menu-left {\n right: auto;\n left: 0;\n }\n}\n.btn-group,\n.btn-group-vertical {\n position: relative;\n display: inline-block;\n vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n position: relative;\n float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n margin-left: -1px;\n}\n.btn-toolbar {\n margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n border-radius: 0;\n}\n.btn-group > .btn:first-child {\n margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n padding-right: 8px;\n padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n padding-right: 12px;\n padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn .caret {\n margin-left: 0;\n}\n.btn-lg .caret {\n border-width: 5px 5px 0;\n border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n display: block;\n float: none;\n width: 100%;\n max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n margin-top: -1px;\n margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n border-bottom-right-radius: 4px;\n border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group-justified {\n display: table;\n width: 100%;\n table-layout: fixed;\n border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n display: table-cell;\n float: none;\n width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n position: absolute;\n clip: rect(0, 0, 0, 0);\n pointer-events: none;\n}\n.input-group {\n position: relative;\n display: table;\n border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n float: none;\n padding-right: 0;\n padding-left: 0;\n}\n.input-group .form-control {\n position: relative;\n z-index: 2;\n float: left;\n width: 100%;\n margin-bottom: 0;\n}\n.input-group .form-control:focus {\n z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n width: 1%;\n white-space: nowrap;\n vertical-align: middle;\n}\n.input-group-addon {\n padding: 6px 12px;\n font-size: 14px;\n font-weight: normal;\n line-height: 1;\n color: #555;\n text-align: center;\n background-color: #eee;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\n.input-group-addon.input-sm {\n padding: 5px 10px;\n font-size: 12px;\n border-radius: 3px;\n}\n.input-group-addon.input-lg {\n padding: 10px 16px;\n font-size: 18px;\n border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n border-left: 0;\n}\n.input-group-btn {\n position: relative;\n font-size: 0;\n white-space: nowrap;\n}\n.input-group-btn > .btn {\n position: relative;\n}\n.input-group-btn > .btn + .btn {\n margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n z-index: 2;\n margin-left: -1px;\n}\n.nav {\n padding-left: 0;\n margin-bottom: 0;\n list-style: none;\n}\n.nav > li {\n position: relative;\n display: block;\n}\n.nav > li > a {\n position: relative;\n display: block;\n padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n text-decoration: none;\n background-color: #eee;\n}\n.nav > li.disabled > a {\n color: #777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n color: #777;\n text-decoration: none;\n cursor: not-allowed;\n background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n background-color: #eee;\n border-color: #337ab7;\n}\n.nav .nav-divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.nav > li > a > img {\n max-width: none;\n}\n.nav-tabs {\n border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n float: left;\n margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n margin-right: 2px;\n line-height: 1.42857143;\n border: 1px solid transparent;\n border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n border-color: #eee #eee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n color: #555;\n cursor: default;\n background-color: #fff;\n border: 1px solid #ddd;\n border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n width: 100%;\n border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n float: none;\n}\n.nav-tabs.nav-justified > li > a {\n margin-bottom: 5px;\n text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-tabs.nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs.nav-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs.nav-justified > .active > a,\n .nav-tabs.nav-justified > .active > a:hover,\n .nav-tabs.nav-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.nav-pills > li {\n float: left;\n}\n.nav-pills > li > a {\n border-radius: 4px;\n}\n.nav-pills > li + li {\n margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n color: #fff;\n background-color: #337ab7;\n}\n.nav-stacked > li {\n float: none;\n}\n.nav-stacked > li + li {\n margin-top: 2px;\n margin-left: 0;\n}\n.nav-justified {\n width: 100%;\n}\n.nav-justified > li {\n float: none;\n}\n.nav-justified > li > a {\n margin-bottom: 5px;\n text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs-justified {\n border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs-justified > .active > a,\n .nav-tabs-justified > .active > a:hover,\n .nav-tabs-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.tab-content > .tab-pane {\n display: none;\n}\n.tab-content > .active {\n display: block;\n}\n.nav-tabs .dropdown-menu {\n margin-top: -1px;\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.navbar {\n position: relative;\n min-height: 50px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n .navbar {\n border-radius: 4px;\n }\n}\n@media (min-width: 768px) {\n .navbar-header {\n float: left;\n }\n}\n.navbar-collapse {\n padding-right: 15px;\n padding-left: 15px;\n overflow-x: visible;\n -webkit-overflow-scrolling: touch;\n border-top: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n}\n.navbar-collapse.in {\n overflow-y: auto;\n}\n@media (min-width: 768px) {\n .navbar-collapse {\n width: auto;\n border-top: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n .navbar-collapse.collapse {\n display: block !important;\n height: auto !important;\n padding-bottom: 0;\n overflow: visible !important;\n }\n .navbar-collapse.in {\n overflow-y: visible;\n }\n .navbar-fixed-top .navbar-collapse,\n .navbar-static-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n padding-right: 0;\n padding-left: 0;\n }\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n .navbar-fixed-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n max-height: 200px;\n }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n margin-right: -15px;\n margin-left: -15px;\n}\n@media (min-width: 768px) {\n .container > .navbar-header,\n .container-fluid > .navbar-header,\n .container > .navbar-collapse,\n .container-fluid > .navbar-collapse {\n margin-right: 0;\n margin-left: 0;\n }\n}\n.navbar-static-top {\n z-index: 1000;\n border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n .navbar-static-top {\n border-radius: 0;\n }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n position: fixed;\n right: 0;\n left: 0;\n z-index: 1030;\n}\n@media (min-width: 768px) {\n .navbar-fixed-top,\n .navbar-fixed-bottom {\n border-radius: 0;\n }\n}\n.navbar-fixed-top {\n top: 0;\n border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n bottom: 0;\n margin-bottom: 0;\n border-width: 1px 0 0;\n}\n.navbar-brand {\n float: left;\n height: 50px;\n padding: 15px 15px;\n font-size: 18px;\n line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n text-decoration: none;\n}\n.navbar-brand > img {\n display: block;\n}\n@media (min-width: 768px) {\n .navbar > .container .navbar-brand,\n .navbar > .container-fluid .navbar-brand {\n margin-left: -15px;\n }\n}\n.navbar-toggle {\n position: relative;\n float: right;\n padding: 9px 10px;\n margin-top: 8px;\n margin-right: 15px;\n margin-bottom: 8px;\n background-color: transparent;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.navbar-toggle:focus {\n outline: 0;\n}\n.navbar-toggle .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n margin-top: 4px;\n}\n@media (min-width: 768px) {\n .navbar-toggle {\n display: none;\n }\n}\n.navbar-nav {\n margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n padding-top: 10px;\n padding-bottom: 10px;\n line-height: 20px;\n}\n@media (max-width: 767px) {\n .navbar-nav .open .dropdown-menu {\n position: static;\n float: none;\n width: auto;\n margin-top: 0;\n background-color: transparent;\n border: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n .navbar-nav .open .dropdown-menu > li > a,\n .navbar-nav .open .dropdown-menu .dropdown-header {\n padding: 5px 15px 5px 25px;\n }\n .navbar-nav .open .dropdown-menu > li > a {\n line-height: 20px;\n }\n .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-nav .open .dropdown-menu > li > a:focus {\n background-image: none;\n }\n}\n@media (min-width: 768px) {\n .navbar-nav {\n float: left;\n margin: 0;\n }\n .navbar-nav > li {\n float: left;\n }\n .navbar-nav > li > a {\n padding-top: 15px;\n padding-bottom: 15px;\n }\n}\n.navbar-form {\n padding: 10px 15px;\n margin-top: 8px;\n margin-right: -15px;\n margin-bottom: 8px;\n margin-left: -15px;\n border-top: 1px solid transparent;\n border-bottom: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n}\n@media (min-width: 768px) {\n .navbar-form .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .navbar-form .form-control-static {\n display: inline-block;\n }\n .navbar-form .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .navbar-form .input-group .input-group-addon,\n .navbar-form .input-group .input-group-btn,\n .navbar-form .input-group .form-control {\n width: auto;\n }\n .navbar-form .input-group > .form-control {\n width: 100%;\n }\n .navbar-form .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio,\n .navbar-form .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio label,\n .navbar-form .checkbox label {\n padding-left: 0;\n }\n .navbar-form .radio input[type=\"radio\"],\n .navbar-form .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .navbar-form .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n@media (max-width: 767px) {\n .navbar-form .form-group {\n margin-bottom: 5px;\n }\n .navbar-form .form-group:last-child {\n margin-bottom: 0;\n }\n}\n@media (min-width: 768px) {\n .navbar-form {\n width: auto;\n padding-top: 0;\n padding-bottom: 0;\n margin-right: 0;\n margin-left: 0;\n border: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n}\n.navbar-nav > li > .dropdown-menu {\n margin-top: 0;\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n margin-bottom: 0;\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.navbar-btn {\n margin-top: 8px;\n margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n margin-top: 14px;\n margin-bottom: 14px;\n}\n.navbar-text {\n margin-top: 15px;\n margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n .navbar-text {\n float: left;\n margin-right: 15px;\n margin-left: 15px;\n }\n}\n@media (min-width: 768px) {\n .navbar-left {\n float: left !important;\n }\n .navbar-right {\n float: right !important;\n margin-right: -15px;\n }\n .navbar-right ~ .navbar-right {\n margin-right: 0;\n }\n}\n.navbar-default {\n background-color: #f8f8f8;\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n color: #5e5e5e;\n background-color: transparent;\n}\n.navbar-default .navbar-text {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n color: #333;\n background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n color: #777;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #333;\n background-color: transparent;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n }\n}\n.navbar-default .navbar-link {\n color: #777;\n}\n.navbar-default .navbar-link:hover {\n color: #333;\n}\n.navbar-default .btn-link {\n color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n color: #ccc;\n}\n.navbar-inverse {\n background-color: #222;\n border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n color: #fff;\n background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n color: #fff;\n background-color: #080808;\n}\n@media (max-width: 767px) {\n .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n border-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n color: #9d9d9d;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #fff;\n background-color: transparent;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n }\n}\n.navbar-inverse .navbar-link {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n color: #fff;\n}\n.navbar-inverse .btn-link {\n color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n color: #444;\n}\n.breadcrumb {\n padding: 8px 15px;\n margin-bottom: 20px;\n list-style: none;\n background-color: #f5f5f5;\n border-radius: 4px;\n}\n.breadcrumb > li {\n display: inline-block;\n}\n.breadcrumb > li + li:before {\n padding: 0 5px;\n color: #ccc;\n content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n color: #777;\n}\n.pagination {\n display: inline-block;\n padding-left: 0;\n margin: 20px 0;\n border-radius: 4px;\n}\n.pagination > li {\n display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n position: relative;\n float: left;\n padding: 6px 12px;\n margin-left: -1px;\n line-height: 1.42857143;\n color: #337ab7;\n text-decoration: none;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n margin-left: 0;\n border-top-left-radius: 4px;\n border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n border-top-right-radius: 4px;\n border-bottom-right-radius: 4px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n z-index: 2;\n color: #23527c;\n background-color: #eee;\n border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n z-index: 3;\n color: #fff;\n cursor: default;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n color: #777;\n cursor: not-allowed;\n background-color: #fff;\n border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n border-top-left-radius: 6px;\n border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n border-top-right-radius: 6px;\n border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n border-top-left-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n border-top-right-radius: 3px;\n border-bottom-right-radius: 3px;\n}\n.pager {\n padding-left: 0;\n margin: 20px 0;\n text-align: center;\n list-style: none;\n}\n.pager li {\n display: inline;\n}\n.pager li > a,\n.pager li > span {\n display: inline-block;\n padding: 5px 14px;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n text-decoration: none;\n background-color: #eee;\n}\n.pager .next > a,\n.pager .next > span {\n float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n color: #777;\n cursor: not-allowed;\n background-color: #fff;\n}\n.label {\n display: inline;\n padding: .2em .6em .3em;\n font-size: 75%;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n border-radius: .25em;\n}\na.label:hover,\na.label:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.label:empty {\n display: none;\n}\n.btn .label {\n position: relative;\n top: -1px;\n}\n.label-default {\n background-color: #777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n background-color: #5e5e5e;\n}\n.label-primary {\n background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n background-color: #286090;\n}\n.label-success {\n background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n background-color: #449d44;\n}\n.label-info {\n background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n background-color: #31b0d5;\n}\n.label-warning {\n background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n background-color: #ec971f;\n}\n.label-danger {\n background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n background-color: #c9302c;\n}\n.badge {\n display: inline-block;\n min-width: 10px;\n padding: 3px 7px;\n font-size: 12px;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: middle;\n background-color: #777;\n border-radius: 10px;\n}\n.badge:empty {\n display: none;\n}\n.btn .badge {\n position: relative;\n top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n top: 0;\n padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.list-group-item > .badge {\n float: right;\n}\n.list-group-item > .badge + .badge {\n margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n margin-left: 3px;\n}\n.jumbotron {\n padding-top: 30px;\n padding-bottom: 30px;\n margin-bottom: 30px;\n color: inherit;\n background-color: #eee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n color: inherit;\n}\n.jumbotron p {\n margin-bottom: 15px;\n font-size: 21px;\n font-weight: 200;\n}\n.jumbotron > hr {\n border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n padding-right: 15px;\n padding-left: 15px;\n border-radius: 6px;\n}\n.jumbotron .container {\n max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n .jumbotron {\n padding-top: 48px;\n padding-bottom: 48px;\n }\n .container .jumbotron,\n .container-fluid .jumbotron {\n padding-right: 60px;\n padding-left: 60px;\n }\n .jumbotron h1,\n .jumbotron .h1 {\n font-size: 63px;\n }\n}\n.thumbnail {\n display: block;\n padding: 4px;\n margin-bottom: 20px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: border .2s ease-in-out;\n -o-transition: border .2s ease-in-out;\n transition: border .2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n margin-right: auto;\n margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n border-color: #337ab7;\n}\n.thumbnail .caption {\n padding: 9px;\n color: #333;\n}\n.alert {\n padding: 15px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.alert h4 {\n margin-top: 0;\n color: inherit;\n}\n.alert .alert-link {\n font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n margin-bottom: 0;\n}\n.alert > p + p {\n margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n position: relative;\n top: -2px;\n right: -21px;\n color: inherit;\n}\n.alert-success {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.alert-success hr {\n border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n color: #2b542c;\n}\n.alert-info {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.alert-info hr {\n border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n color: #245269;\n}\n.alert-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.alert-warning hr {\n border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n color: #66512c;\n}\n.alert-danger {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.alert-danger hr {\n border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@-o-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n.progress {\n height: 20px;\n margin-bottom: 20px;\n overflow: hidden;\n background-color: #f5f5f5;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n}\n.progress-bar {\n float: left;\n width: 0;\n height: 100%;\n font-size: 12px;\n line-height: 20px;\n color: #fff;\n text-align: center;\n background-color: #337ab7;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n -webkit-transition: width .6s ease;\n -o-transition: width .6s ease;\n transition: width .6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n -webkit-background-size: 40px 40px;\n background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n -webkit-animation: progress-bar-stripes 2s linear infinite;\n -o-animation: progress-bar-stripes 2s linear infinite;\n animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.media {\n margin-top: 15px;\n}\n.media:first-child {\n margin-top: 0;\n}\n.media,\n.media-body {\n overflow: hidden;\n zoom: 1;\n}\n.media-body {\n width: 10000px;\n}\n.media-object {\n display: block;\n}\n.media-object.img-thumbnail {\n max-width: none;\n}\n.media-right,\n.media > .pull-right {\n padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n display: table-cell;\n vertical-align: top;\n}\n.media-middle {\n vertical-align: middle;\n}\n.media-bottom {\n vertical-align: bottom;\n}\n.media-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.media-list {\n padding-left: 0;\n list-style: none;\n}\n.list-group {\n padding-left: 0;\n margin-bottom: 20px;\n}\n.list-group-item {\n position: relative;\n display: block;\n padding: 10px 15px;\n margin-bottom: -1px;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n margin-bottom: 0;\n border-bottom-right-radius: 4px;\n border-bottom-left-radius: 4px;\n}\na.list-group-item,\nbutton.list-group-item {\n color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n color: #555;\n text-decoration: none;\n background-color: #f5f5f5;\n}\nbutton.list-group-item {\n width: 100%;\n text-align: left;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n color: #777;\n cursor: not-allowed;\n background-color: #eee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n color: #777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n z-index: 2;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n color: #c7ddef;\n}\n.list-group-item-success {\n color: #3c763d;\n background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n color: #3c763d;\n background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n color: #fff;\n background-color: #3c763d;\n border-color: #3c763d;\n}\n.list-group-item-info {\n color: #31708f;\n background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n color: #31708f;\n background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n color: #fff;\n background-color: #31708f;\n border-color: #31708f;\n}\n.list-group-item-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n color: #8a6d3b;\n background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n color: #fff;\n background-color: #8a6d3b;\n border-color: #8a6d3b;\n}\n.list-group-item-danger {\n color: #a94442;\n background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n color: #a94442;\n background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n color: #fff;\n background-color: #a94442;\n border-color: #a94442;\n}\n.list-group-item-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.list-group-item-text {\n margin-bottom: 0;\n line-height: 1.3;\n}\n.panel {\n margin-bottom: 20px;\n background-color: #fff;\n border: 1px solid transparent;\n border-radius: 4px;\n -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n}\n.panel-body {\n padding: 15px;\n}\n.panel-heading {\n padding: 10px 15px;\n border-bottom: 1px solid transparent;\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n color: inherit;\n}\n.panel-title {\n margin-top: 0;\n margin-bottom: 0;\n font-size: 16px;\n color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n color: inherit;\n}\n.panel-footer {\n padding: 10px 15px;\n background-color: #f5f5f5;\n border-top: 1px solid #ddd;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n border-width: 1px 0;\n border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n border-top: 0;\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n border-bottom: 0;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n border-top-width: 0;\n}\n.list-group + .panel-footer {\n border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n padding-right: 15px;\n padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n border-bottom: 0;\n}\n.panel > .table-responsive {\n margin-bottom: 0;\n border: 0;\n}\n.panel-group {\n margin-bottom: 20px;\n}\n.panel-group .panel {\n margin-bottom: 0;\n border-radius: 4px;\n}\n.panel-group .panel + .panel {\n margin-top: 5px;\n}\n.panel-group .panel-heading {\n border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n border-bottom: 1px solid #ddd;\n}\n.panel-default {\n border-color: #ddd;\n}\n.panel-default > .panel-heading {\n color: #333;\n background-color: #f5f5f5;\n border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n color: #f5f5f5;\n background-color: #333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ddd;\n}\n.panel-primary {\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #337ab7;\n}\n.panel-success {\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n color: #dff0d8;\n background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #d6e9c6;\n}\n.panel-info {\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n color: #d9edf7;\n background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #bce8f1;\n}\n.panel-warning {\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n color: #fcf8e3;\n background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #faebcc;\n}\n.panel-danger {\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n color: #f2dede;\n background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n position: relative;\n display: block;\n height: 0;\n padding: 0;\n overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n position: absolute;\n top: 0;\n bottom: 0;\n left: 0;\n width: 100%;\n height: 100%;\n border: 0;\n}\n.embed-responsive-16by9 {\n padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n padding-bottom: 75%;\n}\n.well {\n min-height: 20px;\n padding: 19px;\n margin-bottom: 20px;\n background-color: #f5f5f5;\n border: 1px solid #e3e3e3;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n}\n.well blockquote {\n border-color: #ddd;\n border-color: rgba(0, 0, 0, .15);\n}\n.well-lg {\n padding: 24px;\n border-radius: 6px;\n}\n.well-sm {\n padding: 9px;\n border-radius: 3px;\n}\n.close {\n float: right;\n font-size: 21px;\n font-weight: bold;\n line-height: 1;\n color: #000;\n text-shadow: 0 1px 0 #fff;\n filter: alpha(opacity=20);\n opacity: .2;\n}\n.close:hover,\n.close:focus {\n color: #000;\n text-decoration: none;\n cursor: pointer;\n filter: alpha(opacity=50);\n opacity: .5;\n}\nbutton.close {\n -webkit-appearance: none;\n padding: 0;\n cursor: pointer;\n background: transparent;\n border: 0;\n}\n.modal-open {\n overflow: hidden;\n}\n.modal {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1050;\n display: none;\n overflow: hidden;\n -webkit-overflow-scrolling: touch;\n outline: 0;\n}\n.modal.fade .modal-dialog {\n -webkit-transition: -webkit-transform .3s ease-out;\n -o-transition: -o-transform .3s ease-out;\n transition: transform .3s ease-out;\n -webkit-transform: translate(0, -25%);\n -ms-transform: translate(0, -25%);\n -o-transform: translate(0, -25%);\n transform: translate(0, -25%);\n}\n.modal.in .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\n.modal-open .modal {\n overflow-x: hidden;\n overflow-y: auto;\n}\n.modal-dialog {\n position: relative;\n width: auto;\n margin: 10px;\n}\n.modal-content {\n position: relative;\n background-color: #fff;\n -webkit-background-clip: padding-box;\n background-clip: padding-box;\n border: 1px solid #999;\n border: 1px solid rgba(0, 0, 0, .2);\n border-radius: 6px;\n outline: 0;\n -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n}\n.modal-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1040;\n background-color: #000;\n}\n.modal-backdrop.fade {\n filter: alpha(opacity=0);\n opacity: 0;\n}\n.modal-backdrop.in {\n filter: alpha(opacity=50);\n opacity: .5;\n}\n.modal-header {\n padding: 15px;\n border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n margin-top: -2px;\n}\n.modal-title {\n margin: 0;\n line-height: 1.42857143;\n}\n.modal-body {\n position: relative;\n padding: 15px;\n}\n.modal-footer {\n padding: 15px;\n text-align: right;\n border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n margin-bottom: 0;\n margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n margin-left: 0;\n}\n.modal-scrollbar-measure {\n position: absolute;\n top: -9999px;\n width: 50px;\n height: 50px;\n overflow: scroll;\n}\n@media (min-width: 768px) {\n .modal-dialog {\n width: 600px;\n margin: 30px auto;\n }\n .modal-content {\n -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n }\n .modal-sm {\n width: 300px;\n }\n}\n@media (min-width: 992px) {\n .modal-lg {\n width: 900px;\n }\n}\n.tooltip {\n position: absolute;\n z-index: 1070;\n display: block;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 12px;\n font-style: normal;\n font-weight: normal;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n letter-spacing: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n white-space: normal;\n filter: alpha(opacity=0);\n opacity: 0;\n\n line-break: auto;\n}\n.tooltip.in {\n filter: alpha(opacity=90);\n opacity: .9;\n}\n.tooltip.top {\n padding: 5px 0;\n margin-top: -3px;\n}\n.tooltip.right {\n padding: 0 5px;\n margin-left: 3px;\n}\n.tooltip.bottom {\n padding: 5px 0;\n margin-top: 3px;\n}\n.tooltip.left {\n padding: 0 5px;\n margin-left: -3px;\n}\n.tooltip-inner {\n max-width: 200px;\n padding: 3px 8px;\n color: #fff;\n text-align: center;\n background-color: #000;\n border-radius: 4px;\n}\n.tooltip-arrow {\n position: absolute;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n bottom: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n right: 5px;\n bottom: 0;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n bottom: 0;\n left: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n top: 50%;\n left: 0;\n margin-top: -5px;\n border-width: 5px 5px 5px 0;\n border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n top: 50%;\n right: 0;\n margin-top: -5px;\n border-width: 5px 0 5px 5px;\n border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n top: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n top: 0;\n right: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n top: 0;\n left: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.popover {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 1060;\n display: none;\n max-width: 276px;\n padding: 1px;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n font-style: normal;\n font-weight: normal;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n letter-spacing: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n white-space: normal;\n background-color: #fff;\n -webkit-background-clip: padding-box;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, .2);\n border-radius: 6px;\n -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n\n line-break: auto;\n}\n.popover.top {\n margin-top: -10px;\n}\n.popover.right {\n margin-left: 10px;\n}\n.popover.bottom {\n margin-top: 10px;\n}\n.popover.left {\n margin-left: -10px;\n}\n.popover-title {\n padding: 8px 14px;\n margin: 0;\n font-size: 14px;\n background-color: #f7f7f7;\n border-bottom: 1px solid #ebebeb;\n border-radius: 5px 5px 0 0;\n}\n.popover-content {\n padding: 9px 14px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n position: absolute;\n display: block;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.popover > .arrow {\n border-width: 11px;\n}\n.popover > .arrow:after {\n content: \"\";\n border-width: 10px;\n}\n.popover.top > .arrow {\n bottom: -11px;\n left: 50%;\n margin-left: -11px;\n border-top-color: #999;\n border-top-color: rgba(0, 0, 0, .25);\n border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n bottom: 1px;\n margin-left: -10px;\n content: \" \";\n border-top-color: #fff;\n border-bottom-width: 0;\n}\n.popover.right > .arrow {\n top: 50%;\n left: -11px;\n margin-top: -11px;\n border-right-color: #999;\n border-right-color: rgba(0, 0, 0, .25);\n border-left-width: 0;\n}\n.popover.right > .arrow:after {\n bottom: -10px;\n left: 1px;\n content: \" \";\n border-right-color: #fff;\n border-left-width: 0;\n}\n.popover.bottom > .arrow {\n top: -11px;\n left: 50%;\n margin-left: -11px;\n border-top-width: 0;\n border-bottom-color: #999;\n border-bottom-color: rgba(0, 0, 0, .25);\n}\n.popover.bottom > .arrow:after {\n top: 1px;\n margin-left: -10px;\n content: \" \";\n border-top-width: 0;\n border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n top: 50%;\n right: -11px;\n margin-top: -11px;\n border-right-width: 0;\n border-left-color: #999;\n border-left-color: rgba(0, 0, 0, .25);\n}\n.popover.left > .arrow:after {\n right: 1px;\n bottom: -10px;\n content: \" \";\n border-right-width: 0;\n border-left-color: #fff;\n}\n.carousel {\n position: relative;\n}\n.carousel-inner {\n position: relative;\n width: 100%;\n overflow: hidden;\n}\n.carousel-inner > .item {\n position: relative;\n display: none;\n -webkit-transition: .6s ease-in-out left;\n -o-transition: .6s ease-in-out left;\n transition: .6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n .carousel-inner > .item {\n -webkit-transition: -webkit-transform .6s ease-in-out;\n -o-transition: -o-transform .6s ease-in-out;\n transition: transform .6s ease-in-out;\n\n -webkit-backface-visibility: hidden;\n backface-visibility: hidden;\n -webkit-perspective: 1000px;\n perspective: 1000px;\n }\n .carousel-inner > .item.next,\n .carousel-inner > .item.active.right {\n left: 0;\n -webkit-transform: translate3d(100%, 0, 0);\n transform: translate3d(100%, 0, 0);\n }\n .carousel-inner > .item.prev,\n .carousel-inner > .item.active.left {\n left: 0;\n -webkit-transform: translate3d(-100%, 0, 0);\n transform: translate3d(-100%, 0, 0);\n }\n .carousel-inner > .item.next.left,\n .carousel-inner > .item.prev.right,\n .carousel-inner > .item.active {\n left: 0;\n -webkit-transform: translate3d(0, 0, 0);\n transform: translate3d(0, 0, 0);\n }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n display: block;\n}\n.carousel-inner > .active {\n left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n position: absolute;\n top: 0;\n width: 100%;\n}\n.carousel-inner > .next {\n left: 100%;\n}\n.carousel-inner > .prev {\n left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n left: 0;\n}\n.carousel-inner > .active.left {\n left: -100%;\n}\n.carousel-inner > .active.right {\n left: 100%;\n}\n.carousel-control {\n position: absolute;\n top: 0;\n bottom: 0;\n left: 0;\n width: 15%;\n font-size: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n background-color: rgba(0, 0, 0, 0);\n filter: alpha(opacity=50);\n opacity: .5;\n}\n.carousel-control.left {\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001)));\n background-image: linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n background-repeat: repeat-x;\n}\n.carousel-control.right {\n right: 0;\n left: auto;\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5)));\n background-image: linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n color: #fff;\n text-decoration: none;\n filter: alpha(opacity=90);\n outline: 0;\n opacity: .9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n position: absolute;\n top: 50%;\n z-index: 5;\n display: inline-block;\n margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n left: 50%;\n margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n right: 50%;\n margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n width: 20px;\n height: 20px;\n font-family: serif;\n line-height: 1;\n}\n.carousel-control .icon-prev:before {\n content: '\\2039';\n}\n.carousel-control .icon-next:before {\n content: '\\203a';\n}\n.carousel-indicators {\n position: absolute;\n bottom: 10px;\n left: 50%;\n z-index: 15;\n width: 60%;\n padding-left: 0;\n margin-left: -30%;\n text-align: center;\n list-style: none;\n}\n.carousel-indicators li {\n display: inline-block;\n width: 10px;\n height: 10px;\n margin: 1px;\n text-indent: -999px;\n cursor: pointer;\n background-color: #000 \\9;\n background-color: rgba(0, 0, 0, 0);\n border: 1px solid #fff;\n border-radius: 10px;\n}\n.carousel-indicators .active {\n width: 12px;\n height: 12px;\n margin: 0;\n background-color: #fff;\n}\n.carousel-caption {\n position: absolute;\n right: 15%;\n bottom: 20px;\n left: 15%;\n z-index: 10;\n padding-top: 20px;\n padding-bottom: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n}\n.carousel-caption .btn {\n text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-prev,\n .carousel-control .icon-next {\n width: 30px;\n height: 30px;\n margin-top: -10px;\n font-size: 30px;\n }\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .icon-prev {\n margin-left: -10px;\n }\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-next {\n margin-right: -10px;\n }\n .carousel-caption {\n right: 20%;\n left: 20%;\n padding-bottom: 30px;\n }\n .carousel-indicators {\n bottom: 20px;\n }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n display: table;\n content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n clear: both;\n}\n.center-block {\n display: block;\n margin-right: auto;\n margin-left: auto;\n}\n.pull-right {\n float: right !important;\n}\n.pull-left {\n float: left !important;\n}\n.hide {\n display: none !important;\n}\n.show {\n display: block !important;\n}\n.invisible {\n visibility: hidden;\n}\n.text-hide {\n font: 0/0 a;\n color: transparent;\n text-shadow: none;\n background-color: transparent;\n border: 0;\n}\n.hidden {\n display: none !important;\n}\n.affix {\n position: fixed;\n}\n@-ms-viewport {\n width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n display: none !important;\n}\n@media (max-width: 767px) {\n .visible-xs {\n display: block !important;\n }\n table.visible-xs {\n display: table !important;\n }\n tr.visible-xs {\n display: table-row !important;\n }\n th.visible-xs,\n td.visible-xs {\n display: table-cell !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-block {\n display: block !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline {\n display: inline !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm {\n display: block !important;\n }\n table.visible-sm {\n display: table !important;\n }\n tr.visible-sm {\n display: table-row !important;\n }\n th.visible-sm,\n td.visible-sm {\n display: table-cell !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-block {\n display: block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline {\n display: inline !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md {\n display: block !important;\n }\n table.visible-md {\n display: table !important;\n }\n tr.visible-md {\n display: table-row !important;\n }\n th.visible-md,\n td.visible-md {\n display: table-cell !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-block {\n display: block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline {\n display: inline !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg {\n display: block !important;\n }\n table.visible-lg {\n display: table !important;\n }\n tr.visible-lg {\n display: table-row !important;\n }\n th.visible-lg,\n td.visible-lg {\n display: table-cell !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-block {\n display: block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline {\n display: inline !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline-block {\n display: inline-block !important;\n }\n}\n@media (max-width: 767px) {\n .hidden-xs {\n display: none !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .hidden-sm {\n display: none !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .hidden-md {\n display: none !important;\n }\n}\n@media (min-width: 1200px) {\n .hidden-lg {\n display: none !important;\n }\n}\n.visible-print {\n display: none !important;\n}\n@media print {\n .visible-print {\n display: block !important;\n }\n table.visible-print {\n display: table !important;\n }\n tr.visible-print {\n display: table-row !important;\n }\n th.visible-print,\n td.visible-print {\n display: table-cell !important;\n }\n}\n.visible-print-block {\n display: none !important;\n}\n@media print {\n .visible-print-block {\n display: block !important;\n }\n}\n.visible-print-inline {\n display: none !important;\n}\n@media print {\n .visible-print-inline {\n display: inline !important;\n }\n}\n.visible-print-inline-block {\n display: none !important;\n}\n@media print {\n .visible-print-inline-block {\n display: inline-block !important;\n }\n}\n@media print {\n .hidden-print {\n display: none !important;\n }\n}\n/*# sourceMappingURL=bootstrap.css.map */\n","//\n// Glyphicons for Bootstrap\n//\n// Since icons are fonts, they can be placed anywhere text is placed and are\n// thus automatically sized to match the surrounding child. To use, create an\n// inline element with the appropriate classes, like so:\n//\n// Star\n\n// Import the fonts\n@font-face {\n font-family: 'Glyphicons Halflings';\n src: url('@{icon-font-path}@{icon-font-name}.eot');\n src: url('@{icon-font-path}@{icon-font-name}.eot?#iefix') format('embedded-opentype'),\n url('@{icon-font-path}@{icon-font-name}.woff2') format('woff2'),\n url('@{icon-font-path}@{icon-font-name}.woff') format('woff'),\n url('@{icon-font-path}@{icon-font-name}.ttf') format('truetype'),\n url('@{icon-font-path}@{icon-font-name}.svg#@{icon-font-svg-id}') format('svg');\n}\n\n// Catchall baseclass\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: 'Glyphicons Halflings';\n font-style: normal;\n font-weight: normal;\n line-height: 1;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n// Individual icons\n.glyphicon-asterisk { &:before { content: \"\\002a\"; } }\n.glyphicon-plus { &:before { content: \"\\002b\"; } }\n.glyphicon-euro,\n.glyphicon-eur { &:before { content: \"\\20ac\"; } }\n.glyphicon-minus { &:before { content: \"\\2212\"; } }\n.glyphicon-cloud { &:before { content: \"\\2601\"; } }\n.glyphicon-envelope { &:before { content: \"\\2709\"; } }\n.glyphicon-pencil { &:before { content: \"\\270f\"; } }\n.glyphicon-glass { &:before { content: \"\\e001\"; } }\n.glyphicon-music { &:before { content: \"\\e002\"; } }\n.glyphicon-search { &:before { content: \"\\e003\"; } }\n.glyphicon-heart { &:before { content: \"\\e005\"; } }\n.glyphicon-star { &:before { content: \"\\e006\"; } }\n.glyphicon-star-empty { &:before { content: \"\\e007\"; } }\n.glyphicon-user { &:before { content: \"\\e008\"; } }\n.glyphicon-film { &:before { content: \"\\e009\"; } }\n.glyphicon-th-large { &:before { content: \"\\e010\"; } }\n.glyphicon-th { &:before { content: \"\\e011\"; } }\n.glyphicon-th-list { &:before { content: \"\\e012\"; } }\n.glyphicon-ok { &:before { content: \"\\e013\"; } }\n.glyphicon-remove { &:before { content: \"\\e014\"; } }\n.glyphicon-zoom-in { &:before { content: \"\\e015\"; } }\n.glyphicon-zoom-out { &:before { content: \"\\e016\"; } }\n.glyphicon-off { &:before { content: \"\\e017\"; } }\n.glyphicon-signal { &:before { content: \"\\e018\"; } }\n.glyphicon-cog { &:before { content: \"\\e019\"; } }\n.glyphicon-trash { &:before { content: \"\\e020\"; } }\n.glyphicon-home { &:before { content: \"\\e021\"; } }\n.glyphicon-file { &:before { content: \"\\e022\"; } }\n.glyphicon-time { &:before { content: \"\\e023\"; } }\n.glyphicon-road { &:before { content: \"\\e024\"; } }\n.glyphicon-download-alt { &:before { content: \"\\e025\"; } }\n.glyphicon-download { &:before { content: \"\\e026\"; } }\n.glyphicon-upload { &:before { content: \"\\e027\"; } }\n.glyphicon-inbox { &:before { content: \"\\e028\"; } }\n.glyphicon-play-circle { &:before { content: \"\\e029\"; } }\n.glyphicon-repeat { &:before { content: \"\\e030\"; } }\n.glyphicon-refresh { &:before { content: \"\\e031\"; } }\n.glyphicon-list-alt { &:before { content: \"\\e032\"; } }\n.glyphicon-lock { &:before { content: \"\\e033\"; } }\n.glyphicon-flag { &:before { content: \"\\e034\"; } }\n.glyphicon-headphones { &:before { content: \"\\e035\"; } }\n.glyphicon-volume-off { &:before { content: \"\\e036\"; } }\n.glyphicon-volume-down { &:before { content: \"\\e037\"; } }\n.glyphicon-volume-up { &:before { content: \"\\e038\"; } }\n.glyphicon-qrcode { &:before { content: \"\\e039\"; } }\n.glyphicon-barcode { &:before { content: \"\\e040\"; } }\n.glyphicon-tag { &:before { content: \"\\e041\"; } }\n.glyphicon-tags { &:before { content: \"\\e042\"; } }\n.glyphicon-book { &:before { content: \"\\e043\"; } }\n.glyphicon-bookmark { &:before { content: \"\\e044\"; } }\n.glyphicon-print { &:before { content: \"\\e045\"; } }\n.glyphicon-camera { &:before { content: \"\\e046\"; } }\n.glyphicon-font { &:before { content: \"\\e047\"; } }\n.glyphicon-bold { &:before { content: \"\\e048\"; } }\n.glyphicon-italic { &:before { content: \"\\e049\"; } }\n.glyphicon-text-height { &:before { content: \"\\e050\"; } }\n.glyphicon-text-width { &:before { content: \"\\e051\"; } }\n.glyphicon-align-left { &:before { content: \"\\e052\"; } }\n.glyphicon-align-center { &:before { content: \"\\e053\"; } }\n.glyphicon-align-right { &:before { content: \"\\e054\"; } }\n.glyphicon-align-justify { &:before { content: \"\\e055\"; } }\n.glyphicon-list { &:before { content: \"\\e056\"; } }\n.glyphicon-indent-left { &:before { content: \"\\e057\"; } }\n.glyphicon-indent-right { &:before { content: \"\\e058\"; } }\n.glyphicon-facetime-video { &:before { content: \"\\e059\"; } }\n.glyphicon-picture { &:before { content: \"\\e060\"; } }\n.glyphicon-map-marker { &:before { content: \"\\e062\"; } }\n.glyphicon-adjust { &:before { content: \"\\e063\"; } }\n.glyphicon-tint { &:before { content: \"\\e064\"; } }\n.glyphicon-edit { &:before { content: \"\\e065\"; } }\n.glyphicon-share { &:before { content: \"\\e066\"; } }\n.glyphicon-check { &:before { content: \"\\e067\"; } }\n.glyphicon-move { &:before { content: \"\\e068\"; } }\n.glyphicon-step-backward { &:before { content: \"\\e069\"; } }\n.glyphicon-fast-backward { &:before { content: \"\\e070\"; } }\n.glyphicon-backward { &:before { content: \"\\e071\"; } }\n.glyphicon-play { &:before { content: \"\\e072\"; } }\n.glyphicon-pause { &:before { content: \"\\e073\"; } }\n.glyphicon-stop { &:before { content: \"\\e074\"; } }\n.glyphicon-forward { &:before { content: \"\\e075\"; } }\n.glyphicon-fast-forward { &:before { content: \"\\e076\"; } }\n.glyphicon-step-forward { &:before { content: \"\\e077\"; } }\n.glyphicon-eject { &:before { content: \"\\e078\"; } }\n.glyphicon-chevron-left { &:before { content: \"\\e079\"; } }\n.glyphicon-chevron-right { &:before { content: \"\\e080\"; } }\n.glyphicon-plus-sign { &:before { content: \"\\e081\"; } }\n.glyphicon-minus-sign { &:before { content: \"\\e082\"; } }\n.glyphicon-remove-sign { &:before { content: \"\\e083\"; } }\n.glyphicon-ok-sign { &:before { content: \"\\e084\"; } }\n.glyphicon-question-sign { &:before { content: \"\\e085\"; } }\n.glyphicon-info-sign { &:before { content: \"\\e086\"; } }\n.glyphicon-screenshot { &:before { content: \"\\e087\"; } }\n.glyphicon-remove-circle { &:before { content: \"\\e088\"; } }\n.glyphicon-ok-circle { &:before { content: \"\\e089\"; } }\n.glyphicon-ban-circle { &:before { content: \"\\e090\"; } }\n.glyphicon-arrow-left { &:before { content: \"\\e091\"; } }\n.glyphicon-arrow-right { &:before { content: \"\\e092\"; } }\n.glyphicon-arrow-up { &:before { content: \"\\e093\"; } }\n.glyphicon-arrow-down { &:before { content: \"\\e094\"; } }\n.glyphicon-share-alt { &:before { content: \"\\e095\"; } }\n.glyphicon-resize-full { &:before { content: \"\\e096\"; } }\n.glyphicon-resize-small { &:before { content: \"\\e097\"; } }\n.glyphicon-exclamation-sign { &:before { content: \"\\e101\"; } }\n.glyphicon-gift { &:before { content: \"\\e102\"; } }\n.glyphicon-leaf { &:before { content: \"\\e103\"; } }\n.glyphicon-fire { &:before { content: \"\\e104\"; } }\n.glyphicon-eye-open { &:before { content: \"\\e105\"; } }\n.glyphicon-eye-close { &:before { content: \"\\e106\"; } }\n.glyphicon-warning-sign { &:before { content: \"\\e107\"; } }\n.glyphicon-plane { &:before { content: \"\\e108\"; } }\n.glyphicon-calendar { &:before { content: \"\\e109\"; } }\n.glyphicon-random { &:before { content: \"\\e110\"; } }\n.glyphicon-comment { &:before { content: \"\\e111\"; } }\n.glyphicon-magnet { &:before { content: \"\\e112\"; } }\n.glyphicon-chevron-up { &:before { content: \"\\e113\"; } }\n.glyphicon-chevron-down { &:before { content: \"\\e114\"; } }\n.glyphicon-retweet { &:before { content: \"\\e115\"; } }\n.glyphicon-shopping-cart { &:before { content: \"\\e116\"; } }\n.glyphicon-folder-close { &:before { content: \"\\e117\"; } }\n.glyphicon-folder-open { &:before { content: \"\\e118\"; } }\n.glyphicon-resize-vertical { &:before { content: \"\\e119\"; } }\n.glyphicon-resize-horizontal { &:before { content: \"\\e120\"; } }\n.glyphicon-hdd { &:before { content: \"\\e121\"; } }\n.glyphicon-bullhorn { &:before { content: \"\\e122\"; } }\n.glyphicon-bell { &:before { content: \"\\e123\"; } }\n.glyphicon-certificate { &:before { content: \"\\e124\"; } }\n.glyphicon-thumbs-up { &:before { content: \"\\e125\"; } }\n.glyphicon-thumbs-down { &:before { content: \"\\e126\"; } }\n.glyphicon-hand-right { &:before { content: \"\\e127\"; } }\n.glyphicon-hand-left { &:before { content: \"\\e128\"; } }\n.glyphicon-hand-up { &:before { content: \"\\e129\"; } }\n.glyphicon-hand-down { &:before { content: \"\\e130\"; } }\n.glyphicon-circle-arrow-right { &:before { content: \"\\e131\"; } }\n.glyphicon-circle-arrow-left { &:before { content: \"\\e132\"; } }\n.glyphicon-circle-arrow-up { &:before { content: \"\\e133\"; } }\n.glyphicon-circle-arrow-down { &:before { content: \"\\e134\"; } }\n.glyphicon-globe { &:before { content: \"\\e135\"; } }\n.glyphicon-wrench { &:before { content: \"\\e136\"; } }\n.glyphicon-tasks { &:before { content: \"\\e137\"; } }\n.glyphicon-filter { &:before { content: \"\\e138\"; } }\n.glyphicon-briefcase { &:before { content: \"\\e139\"; } }\n.glyphicon-fullscreen { &:before { content: \"\\e140\"; } }\n.glyphicon-dashboard { &:before { content: \"\\e141\"; } }\n.glyphicon-paperclip { &:before { content: \"\\e142\"; } }\n.glyphicon-heart-empty { &:before { content: \"\\e143\"; } }\n.glyphicon-link { &:before { content: \"\\e144\"; } }\n.glyphicon-phone { &:before { content: \"\\e145\"; } }\n.glyphicon-pushpin { &:before { content: \"\\e146\"; } }\n.glyphicon-usd { &:before { content: \"\\e148\"; } }\n.glyphicon-gbp { &:before { content: \"\\e149\"; } }\n.glyphicon-sort { &:before { content: \"\\e150\"; } }\n.glyphicon-sort-by-alphabet { &:before { content: \"\\e151\"; } }\n.glyphicon-sort-by-alphabet-alt { &:before { content: \"\\e152\"; } }\n.glyphicon-sort-by-order { &:before { content: \"\\e153\"; } }\n.glyphicon-sort-by-order-alt { &:before { content: \"\\e154\"; } }\n.glyphicon-sort-by-attributes { &:before { content: \"\\e155\"; } }\n.glyphicon-sort-by-attributes-alt { &:before { content: \"\\e156\"; } }\n.glyphicon-unchecked { &:before { content: \"\\e157\"; } }\n.glyphicon-expand { &:before { content: \"\\e158\"; } }\n.glyphicon-collapse-down { &:before { content: \"\\e159\"; } }\n.glyphicon-collapse-up { &:before { content: \"\\e160\"; } }\n.glyphicon-log-in { &:before { content: \"\\e161\"; } }\n.glyphicon-flash { &:before { content: \"\\e162\"; } }\n.glyphicon-log-out { &:before { content: \"\\e163\"; } }\n.glyphicon-new-window { &:before { content: \"\\e164\"; } }\n.glyphicon-record { &:before { content: \"\\e165\"; } }\n.glyphicon-save { &:before { content: \"\\e166\"; } }\n.glyphicon-open { &:before { content: \"\\e167\"; } }\n.glyphicon-saved { &:before { content: \"\\e168\"; } }\n.glyphicon-import { &:before { content: \"\\e169\"; } }\n.glyphicon-export { &:before { content: \"\\e170\"; } }\n.glyphicon-send { &:before { content: \"\\e171\"; } }\n.glyphicon-floppy-disk { &:before { content: \"\\e172\"; } }\n.glyphicon-floppy-saved { &:before { content: \"\\e173\"; } }\n.glyphicon-floppy-remove { &:before { content: \"\\e174\"; } }\n.glyphicon-floppy-save { &:before { content: \"\\e175\"; } }\n.glyphicon-floppy-open { &:before { content: \"\\e176\"; } }\n.glyphicon-credit-card { &:before { content: \"\\e177\"; } }\n.glyphicon-transfer { &:before { content: \"\\e178\"; } }\n.glyphicon-cutlery { &:before { content: \"\\e179\"; } }\n.glyphicon-header { &:before { content: \"\\e180\"; } }\n.glyphicon-compressed { &:before { content: \"\\e181\"; } }\n.glyphicon-earphone { &:before { content: \"\\e182\"; } }\n.glyphicon-phone-alt { &:before { content: \"\\e183\"; } }\n.glyphicon-tower { &:before { content: \"\\e184\"; } }\n.glyphicon-stats { &:before { content: \"\\e185\"; } }\n.glyphicon-sd-video { &:before { content: \"\\e186\"; } }\n.glyphicon-hd-video { &:before { content: \"\\e187\"; } }\n.glyphicon-subtitles { &:before { content: \"\\e188\"; } }\n.glyphicon-sound-stereo { &:before { content: \"\\e189\"; } }\n.glyphicon-sound-dolby { &:before { content: \"\\e190\"; } }\n.glyphicon-sound-5-1 { &:before { content: \"\\e191\"; } }\n.glyphicon-sound-6-1 { &:before { content: \"\\e192\"; } }\n.glyphicon-sound-7-1 { &:before { content: \"\\e193\"; } }\n.glyphicon-copyright-mark { &:before { content: \"\\e194\"; } }\n.glyphicon-registration-mark { &:before { content: \"\\e195\"; } }\n.glyphicon-cloud-download { &:before { content: \"\\e197\"; } }\n.glyphicon-cloud-upload { &:before { content: \"\\e198\"; } }\n.glyphicon-tree-conifer { &:before { content: \"\\e199\"; } }\n.glyphicon-tree-deciduous { &:before { content: \"\\e200\"; } }\n.glyphicon-cd { &:before { content: \"\\e201\"; } }\n.glyphicon-save-file { &:before { content: \"\\e202\"; } }\n.glyphicon-open-file { &:before { content: \"\\e203\"; } }\n.glyphicon-level-up { &:before { content: \"\\e204\"; } }\n.glyphicon-copy { &:before { content: \"\\e205\"; } }\n.glyphicon-paste { &:before { content: \"\\e206\"; } }\n// The following 2 Glyphicons are omitted for the time being because\n// they currently use Unicode codepoints that are outside the\n// Basic Multilingual Plane (BMP). Older buggy versions of WebKit can't handle\n// non-BMP codepoints in CSS string escapes, and thus can't display these two icons.\n// Notably, the bug affects some older versions of the Android Browser.\n// More info: https://github.com/twbs/bootstrap/issues/10106\n// .glyphicon-door { &:before { content: \"\\1f6aa\"; } }\n// .glyphicon-key { &:before { content: \"\\1f511\"; } }\n.glyphicon-alert { &:before { content: \"\\e209\"; } }\n.glyphicon-equalizer { &:before { content: \"\\e210\"; } }\n.glyphicon-king { &:before { content: \"\\e211\"; } }\n.glyphicon-queen { &:before { content: \"\\e212\"; } }\n.glyphicon-pawn { &:before { content: \"\\e213\"; } }\n.glyphicon-bishop { &:before { content: \"\\e214\"; } }\n.glyphicon-knight { &:before { content: \"\\e215\"; } }\n.glyphicon-baby-formula { &:before { content: \"\\e216\"; } }\n.glyphicon-tent { &:before { content: \"\\26fa\"; } }\n.glyphicon-blackboard { &:before { content: \"\\e218\"; } }\n.glyphicon-bed { &:before { content: \"\\e219\"; } }\n.glyphicon-apple { &:before { content: \"\\f8ff\"; } }\n.glyphicon-erase { &:before { content: \"\\e221\"; } }\n.glyphicon-hourglass { &:before { content: \"\\231b\"; } }\n.glyphicon-lamp { &:before { content: \"\\e223\"; } }\n.glyphicon-duplicate { &:before { content: \"\\e224\"; } }\n.glyphicon-piggy-bank { &:before { content: \"\\e225\"; } }\n.glyphicon-scissors { &:before { content: \"\\e226\"; } }\n.glyphicon-bitcoin { &:before { content: \"\\e227\"; } }\n.glyphicon-btc { &:before { content: \"\\e227\"; } }\n.glyphicon-xbt { &:before { content: \"\\e227\"; } }\n.glyphicon-yen { &:before { content: \"\\00a5\"; } }\n.glyphicon-jpy { &:before { content: \"\\00a5\"; } }\n.glyphicon-ruble { &:before { content: \"\\20bd\"; } }\n.glyphicon-rub { &:before { content: \"\\20bd\"; } }\n.glyphicon-scale { &:before { content: \"\\e230\"; } }\n.glyphicon-ice-lolly { &:before { content: \"\\e231\"; } }\n.glyphicon-ice-lolly-tasted { &:before { content: \"\\e232\"; } }\n.glyphicon-education { &:before { content: \"\\e233\"; } }\n.glyphicon-option-horizontal { &:before { content: \"\\e234\"; } }\n.glyphicon-option-vertical { &:before { content: \"\\e235\"; } }\n.glyphicon-menu-hamburger { &:before { content: \"\\e236\"; } }\n.glyphicon-modal-window { &:before { content: \"\\e237\"; } }\n.glyphicon-oil { &:before { content: \"\\e238\"; } }\n.glyphicon-grain { &:before { content: \"\\e239\"; } }\n.glyphicon-sunglasses { &:before { content: \"\\e240\"; } }\n.glyphicon-text-size { &:before { content: \"\\e241\"; } }\n.glyphicon-text-color { &:before { content: \"\\e242\"; } }\n.glyphicon-text-background { &:before { content: \"\\e243\"; } }\n.glyphicon-object-align-top { &:before { content: \"\\e244\"; } }\n.glyphicon-object-align-bottom { &:before { content: \"\\e245\"; } }\n.glyphicon-object-align-horizontal{ &:before { content: \"\\e246\"; } }\n.glyphicon-object-align-left { &:before { content: \"\\e247\"; } }\n.glyphicon-object-align-vertical { &:before { content: \"\\e248\"; } }\n.glyphicon-object-align-right { &:before { content: \"\\e249\"; } }\n.glyphicon-triangle-right { &:before { content: \"\\e250\"; } }\n.glyphicon-triangle-left { &:before { content: \"\\e251\"; } }\n.glyphicon-triangle-bottom { &:before { content: \"\\e252\"; } }\n.glyphicon-triangle-top { &:before { content: \"\\e253\"; } }\n.glyphicon-console { &:before { content: \"\\e254\"; } }\n.glyphicon-superscript { &:before { content: \"\\e255\"; } }\n.glyphicon-subscript { &:before { content: \"\\e256\"; } }\n.glyphicon-menu-left { &:before { content: \"\\e257\"; } }\n.glyphicon-menu-right { &:before { content: \"\\e258\"; } }\n.glyphicon-menu-down { &:before { content: \"\\e259\"; } }\n.glyphicon-menu-up { &:before { content: \"\\e260\"; } }\n","//\n// Scaffolding\n// --------------------------------------------------\n\n\n// Reset the box-sizing\n//\n// Heads up! This reset may cause conflicts with some third-party widgets.\n// For recommendations on resolving such conflicts, see\n// http://getbootstrap.com/getting-started/#third-box-sizing\n* {\n .box-sizing(border-box);\n}\n*:before,\n*:after {\n .box-sizing(border-box);\n}\n\n\n// Body reset\n\nhtml {\n font-size: 10px;\n -webkit-tap-highlight-color: rgba(0,0,0,0);\n}\n\nbody {\n font-family: @font-family-base;\n font-size: @font-size-base;\n line-height: @line-height-base;\n color: @text-color;\n background-color: @body-bg;\n}\n\n// Reset fonts for relevant elements\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\n\n\n// Links\n\na {\n color: @link-color;\n text-decoration: none;\n\n &:hover,\n &:focus {\n color: @link-hover-color;\n text-decoration: @link-hover-decoration;\n }\n\n &:focus {\n .tab-focus();\n }\n}\n\n\n// Figures\n//\n// We reset this here because previously Normalize had no `figure` margins. This\n// ensures we don't break anyone's use of the element.\n\nfigure {\n margin: 0;\n}\n\n\n// Images\n\nimg {\n vertical-align: middle;\n}\n\n// Responsive images (ensure images don't scale beyond their parents)\n.img-responsive {\n .img-responsive();\n}\n\n// Rounded corners\n.img-rounded {\n border-radius: @border-radius-large;\n}\n\n// Image thumbnails\n//\n// Heads up! This is mixin-ed into thumbnails.less for `.thumbnail`.\n.img-thumbnail {\n padding: @thumbnail-padding;\n line-height: @line-height-base;\n background-color: @thumbnail-bg;\n border: 1px solid @thumbnail-border;\n border-radius: @thumbnail-border-radius;\n .transition(all .2s ease-in-out);\n\n // Keep them at most 100% wide\n .img-responsive(inline-block);\n}\n\n// Perfect circle\n.img-circle {\n border-radius: 50%; // set radius in percents\n}\n\n\n// Horizontal rules\n\nhr {\n margin-top: @line-height-computed;\n margin-bottom: @line-height-computed;\n border: 0;\n border-top: 1px solid @hr-border;\n}\n\n\n// Only display content to screen readers\n//\n// See: http://a11yproject.com/posts/how-to-hide-content\n\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n margin: -1px;\n padding: 0;\n overflow: hidden;\n clip: rect(0,0,0,0);\n border: 0;\n}\n\n// Use in conjunction with .sr-only to only display content when it's focused.\n// Useful for \"Skip to main content\" links; see http://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1\n// Credit: HTML5 Boilerplate\n\n.sr-only-focusable {\n &:active,\n &:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n }\n}\n\n\n// iOS \"clickable elements\" fix for role=\"button\"\n//\n// Fixes \"clickability\" issue (and more generally, the firing of events such as focus as well)\n// for traditionally non-focusable elements with role=\"button\"\n// see https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n\n[role=\"button\"] {\n cursor: pointer;\n}\n","// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n -webkit-animation: @animation;\n -o-animation: @animation;\n animation: @animation;\n}\n.animation-name(@name) {\n -webkit-animation-name: @name;\n animation-name: @name;\n}\n.animation-duration(@duration) {\n -webkit-animation-duration: @duration;\n animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n -webkit-animation-timing-function: @timing-function;\n animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n -webkit-animation-delay: @delay;\n animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n -webkit-animation-iteration-count: @iteration-count;\n animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n -webkit-animation-direction: @direction;\n animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n -webkit-animation-fill-mode: @fill-mode;\n animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n -webkit-backface-visibility: @visibility;\n -moz-backface-visibility: @visibility;\n backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n -webkit-box-sizing: @boxmodel;\n -moz-box-sizing: @boxmodel;\n box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n -webkit-column-count: @column-count;\n -moz-column-count: @column-count;\n column-count: @column-count;\n -webkit-column-gap: @column-gap;\n -moz-column-gap: @column-gap;\n column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n word-wrap: break-word;\n -webkit-hyphens: @mode;\n -moz-hyphens: @mode;\n -ms-hyphens: @mode; // IE10+\n -o-hyphens: @mode;\n hyphens: @mode;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n // Firefox\n &::-moz-placeholder {\n color: @color;\n opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n }\n &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n &::-webkit-input-placeholder { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n -webkit-transform: scale(@ratio);\n -ms-transform: scale(@ratio); // IE9 only\n -o-transform: scale(@ratio);\n transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n -webkit-transform: scale(@ratioX, @ratioY);\n -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n -o-transform: scale(@ratioX, @ratioY);\n transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n -webkit-transform: scaleX(@ratio);\n -ms-transform: scaleX(@ratio); // IE9 only\n -o-transform: scaleX(@ratio);\n transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n -webkit-transform: scaleY(@ratio);\n -ms-transform: scaleY(@ratio); // IE9 only\n -o-transform: scaleY(@ratio);\n transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n -webkit-transform: skewX(@x) skewY(@y);\n -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n -o-transform: skewX(@x) skewY(@y);\n transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n -webkit-transform: translate(@x, @y);\n -ms-transform: translate(@x, @y); // IE9 only\n -o-transform: translate(@x, @y);\n transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n -webkit-transform: translate3d(@x, @y, @z);\n transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n -webkit-transform: rotate(@degrees);\n -ms-transform: rotate(@degrees); // IE9 only\n -o-transform: rotate(@degrees);\n transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n -webkit-transform: rotateX(@degrees);\n -ms-transform: rotateX(@degrees); // IE9 only\n -o-transform: rotateX(@degrees);\n transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n -webkit-transform: rotateY(@degrees);\n -ms-transform: rotateY(@degrees); // IE9 only\n -o-transform: rotateY(@degrees);\n transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n -webkit-perspective: @perspective;\n -moz-perspective: @perspective;\n perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n -webkit-perspective-origin: @perspective;\n -moz-perspective-origin: @perspective;\n perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n -webkit-transform-origin: @origin;\n -moz-transform-origin: @origin;\n -ms-transform-origin: @origin; // IE9 only\n transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n -webkit-transition: @transition;\n -o-transition: @transition;\n transition: @transition;\n}\n.transition-property(@transition-property) {\n -webkit-transition-property: @transition-property;\n transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n -webkit-transition-delay: @transition-delay;\n transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n -webkit-transition-duration: @transition-duration;\n transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n -webkit-transition-timing-function: @timing-function;\n transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n -webkit-transition: -webkit-transform @transition;\n -moz-transition: -moz-transform @transition;\n -o-transition: -o-transform @transition;\n transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n -webkit-user-select: @select;\n -moz-user-select: @select;\n -ms-user-select: @select; // IE10+\n user-select: @select;\n}\n","// WebKit-style focus\n\n.tab-focus() {\n // WebKit-specific. Other browsers will keep their default outline style.\n // (Initially tried to also force default via `outline: initial`,\n // but that seems to erroneously remove the outline in Firefox altogether.)\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n","// Image Mixins\n// - Responsive image\n// - Retina image\n\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n.img-responsive(@display: block) {\n display: @display;\n max-width: 100%; // Part 1: Set a maximum relative to the parent\n height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// Retina image\n//\n// Short retina mixin for setting background-image and -size. Note that the\n// spelling of `min--moz-device-pixel-ratio` is intentional.\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n background-image: url(\"@{file-1x}\");\n\n @media\n only screen and (-webkit-min-device-pixel-ratio: 2),\n only screen and ( min--moz-device-pixel-ratio: 2),\n only screen and ( -o-min-device-pixel-ratio: 2/1),\n only screen and ( min-device-pixel-ratio: 2),\n only screen and ( min-resolution: 192dpi),\n only screen and ( min-resolution: 2dppx) {\n background-image: url(\"@{file-2x}\");\n background-size: @width-1x @height-1x;\n }\n}\n","//\n// Typography\n// --------------------------------------------------\n\n\n// Headings\n// -------------------------\n\nh1, h2, h3, h4, h5, h6,\n.h1, .h2, .h3, .h4, .h5, .h6 {\n font-family: @headings-font-family;\n font-weight: @headings-font-weight;\n line-height: @headings-line-height;\n color: @headings-color;\n\n small,\n .small {\n font-weight: normal;\n line-height: 1;\n color: @headings-small-color;\n }\n}\n\nh1, .h1,\nh2, .h2,\nh3, .h3 {\n margin-top: @line-height-computed;\n margin-bottom: (@line-height-computed / 2);\n\n small,\n .small {\n font-size: 65%;\n }\n}\nh4, .h4,\nh5, .h5,\nh6, .h6 {\n margin-top: (@line-height-computed / 2);\n margin-bottom: (@line-height-computed / 2);\n\n small,\n .small {\n font-size: 75%;\n }\n}\n\nh1, .h1 { font-size: @font-size-h1; }\nh2, .h2 { font-size: @font-size-h2; }\nh3, .h3 { font-size: @font-size-h3; }\nh4, .h4 { font-size: @font-size-h4; }\nh5, .h5 { font-size: @font-size-h5; }\nh6, .h6 { font-size: @font-size-h6; }\n\n\n// Body text\n// -------------------------\n\np {\n margin: 0 0 (@line-height-computed / 2);\n}\n\n.lead {\n margin-bottom: @line-height-computed;\n font-size: floor((@font-size-base * 1.15));\n font-weight: 300;\n line-height: 1.4;\n\n @media (min-width: @screen-sm-min) {\n font-size: (@font-size-base * 1.5);\n }\n}\n\n\n// Emphasis & misc\n// -------------------------\n\n// Ex: (12px small font / 14px base font) * 100% = about 85%\nsmall,\n.small {\n font-size: floor((100% * @font-size-small / @font-size-base));\n}\n\nmark,\n.mark {\n background-color: @state-warning-bg;\n padding: .2em;\n}\n\n// Alignment\n.text-left { text-align: left; }\n.text-right { text-align: right; }\n.text-center { text-align: center; }\n.text-justify { text-align: justify; }\n.text-nowrap { white-space: nowrap; }\n\n// Transformation\n.text-lowercase { text-transform: lowercase; }\n.text-uppercase { text-transform: uppercase; }\n.text-capitalize { text-transform: capitalize; }\n\n// Contextual colors\n.text-muted {\n color: @text-muted;\n}\n.text-primary {\n .text-emphasis-variant(@brand-primary);\n}\n.text-success {\n .text-emphasis-variant(@state-success-text);\n}\n.text-info {\n .text-emphasis-variant(@state-info-text);\n}\n.text-warning {\n .text-emphasis-variant(@state-warning-text);\n}\n.text-danger {\n .text-emphasis-variant(@state-danger-text);\n}\n\n// Contextual backgrounds\n// For now we'll leave these alongside the text classes until v4 when we can\n// safely shift things around (per SemVer rules).\n.bg-primary {\n // Given the contrast here, this is the only class to have its color inverted\n // automatically.\n color: #fff;\n .bg-variant(@brand-primary);\n}\n.bg-success {\n .bg-variant(@state-success-bg);\n}\n.bg-info {\n .bg-variant(@state-info-bg);\n}\n.bg-warning {\n .bg-variant(@state-warning-bg);\n}\n.bg-danger {\n .bg-variant(@state-danger-bg);\n}\n\n\n// Page header\n// -------------------------\n\n.page-header {\n padding-bottom: ((@line-height-computed / 2) - 1);\n margin: (@line-height-computed * 2) 0 @line-height-computed;\n border-bottom: 1px solid @page-header-border-color;\n}\n\n\n// Lists\n// -------------------------\n\n// Unordered and Ordered lists\nul,\nol {\n margin-top: 0;\n margin-bottom: (@line-height-computed / 2);\n ul,\n ol {\n margin-bottom: 0;\n }\n}\n\n// List options\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n\n// Inline turns list items into inline-block\n.list-inline {\n .list-unstyled();\n margin-left: -5px;\n\n > li {\n display: inline-block;\n padding-left: 5px;\n padding-right: 5px;\n }\n}\n\n// Description Lists\ndl {\n margin-top: 0; // Remove browser default\n margin-bottom: @line-height-computed;\n}\ndt,\ndd {\n line-height: @line-height-base;\n}\ndt {\n font-weight: bold;\n}\ndd {\n margin-left: 0; // Undo browser default\n}\n\n// Horizontal description lists\n//\n// Defaults to being stacked without any of the below styles applied, until the\n// grid breakpoint is reached (default of ~768px).\n\n.dl-horizontal {\n dd {\n &:extend(.clearfix all); // Clear the floated `dt` if an empty `dd` is present\n }\n\n @media (min-width: @dl-horizontal-breakpoint) {\n dt {\n float: left;\n width: (@dl-horizontal-offset - 20);\n clear: left;\n text-align: right;\n .text-overflow();\n }\n dd {\n margin-left: @dl-horizontal-offset;\n }\n }\n}\n\n\n// Misc\n// -------------------------\n\n// Abbreviations and acronyms\nabbr[title],\n// Add data-* attribute to help out our tooltip plugin, per https://github.com/twbs/bootstrap/issues/5257\nabbr[data-original-title] {\n cursor: help;\n border-bottom: 1px dotted @abbr-border-color;\n}\n.initialism {\n font-size: 90%;\n .text-uppercase();\n}\n\n// Blockquotes\nblockquote {\n padding: (@line-height-computed / 2) @line-height-computed;\n margin: 0 0 @line-height-computed;\n font-size: @blockquote-font-size;\n border-left: 5px solid @blockquote-border-color;\n\n p,\n ul,\n ol {\n &:last-child {\n margin-bottom: 0;\n }\n }\n\n // Note: Deprecated small and .small as of v3.1.0\n // Context: https://github.com/twbs/bootstrap/issues/11660\n footer,\n small,\n .small {\n display: block;\n font-size: 80%; // back to default font-size\n line-height: @line-height-base;\n color: @blockquote-small-color;\n\n &:before {\n content: '\\2014 \\00A0'; // em dash, nbsp\n }\n }\n}\n\n// Opposite alignment of blockquote\n//\n// Heads up: `blockquote.pull-right` has been deprecated as of v3.1.0.\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n border-right: 5px solid @blockquote-border-color;\n border-left: 0;\n text-align: right;\n\n // Account for citation\n footer,\n small,\n .small {\n &:before { content: ''; }\n &:after {\n content: '\\00A0 \\2014'; // nbsp, em dash\n }\n }\n}\n\n// Addresses\naddress {\n margin-bottom: @line-height-computed;\n font-style: normal;\n line-height: @line-height-base;\n}\n","// Typography\n\n.text-emphasis-variant(@color) {\n color: @color;\n a&:hover,\n a&:focus {\n color: darken(@color, 10%);\n }\n}\n","// Contextual backgrounds\n\n.bg-variant(@color) {\n background-color: @color;\n a&:hover,\n a&:focus {\n background-color: darken(@color, 10%);\n }\n}\n","// Text overflow\n// Requires inline-block or block for proper styling\n\n.text-overflow() {\n overflow: hidden;\n text-overflow: ellipsis;\n white-space: nowrap;\n}\n","//\n// Code (inline and block)\n// --------------------------------------------------\n\n\n// Inline and block code styles\ncode,\nkbd,\npre,\nsamp {\n font-family: @font-family-monospace;\n}\n\n// Inline code\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: @code-color;\n background-color: @code-bg;\n border-radius: @border-radius-base;\n}\n\n// User input typically entered via keyboard\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: @kbd-color;\n background-color: @kbd-bg;\n border-radius: @border-radius-small;\n box-shadow: inset 0 -1px 0 rgba(0,0,0,.25);\n\n kbd {\n padding: 0;\n font-size: 100%;\n font-weight: bold;\n box-shadow: none;\n }\n}\n\n// Blocks of code\npre {\n display: block;\n padding: ((@line-height-computed - 1) / 2);\n margin: 0 0 (@line-height-computed / 2);\n font-size: (@font-size-base - 1); // 14px to 13px\n line-height: @line-height-base;\n word-break: break-all;\n word-wrap: break-word;\n color: @pre-color;\n background-color: @pre-bg;\n border: 1px solid @pre-border-color;\n border-radius: @border-radius-base;\n\n // Account for some code outputs that place code tags in pre tags\n code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n }\n}\n\n// Enable scrollable blocks of code\n.pre-scrollable {\n max-height: @pre-scrollable-max-height;\n overflow-y: scroll;\n}\n","//\n// Grid system\n// --------------------------------------------------\n\n\n// Container widths\n//\n// Set the container width, and override it for fixed navbars in media queries.\n\n.container {\n .container-fixed();\n\n @media (min-width: @screen-sm-min) {\n width: @container-sm;\n }\n @media (min-width: @screen-md-min) {\n width: @container-md;\n }\n @media (min-width: @screen-lg-min) {\n width: @container-lg;\n }\n}\n\n\n// Fluid container\n//\n// Utilizes the mixin meant for fixed width containers, but without any defined\n// width for fluid, full width layouts.\n\n.container-fluid {\n .container-fixed();\n}\n\n\n// Row\n//\n// Rows contain and clear the floats of your columns.\n\n.row {\n .make-row();\n}\n\n\n// Columns\n//\n// Common styles for small and large grid columns\n\n.make-grid-columns();\n\n\n// Extra small grid\n//\n// Columns, offsets, pushes, and pulls for extra small devices like\n// smartphones.\n\n.make-grid(xs);\n\n\n// Small grid\n//\n// Columns, offsets, pushes, and pulls for the small device range, from phones\n// to tablets.\n\n@media (min-width: @screen-sm-min) {\n .make-grid(sm);\n}\n\n\n// Medium grid\n//\n// Columns, offsets, pushes, and pulls for the desktop device range.\n\n@media (min-width: @screen-md-min) {\n .make-grid(md);\n}\n\n\n// Large grid\n//\n// Columns, offsets, pushes, and pulls for the large desktop device range.\n\n@media (min-width: @screen-lg-min) {\n .make-grid(lg);\n}\n","// Grid system\n//\n// Generate semantic grid columns with these mixins.\n\n// Centered container element\n.container-fixed(@gutter: @grid-gutter-width) {\n margin-right: auto;\n margin-left: auto;\n padding-left: floor((@gutter / 2));\n padding-right: ceil((@gutter / 2));\n &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n margin-left: ceil((@gutter / -2));\n margin-right: floor((@gutter / -2));\n &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n float: left;\n width: percentage((@columns / @grid-columns));\n min-height: 1px;\n padding-left: (@gutter / 2);\n padding-right: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n margin-left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-push(@columns) {\n left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-pull(@columns) {\n right: percentage((@columns / @grid-columns));\n}\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n min-height: 1px;\n padding-left: (@gutter / 2);\n padding-right: (@gutter / 2);\n\n @media (min-width: @screen-sm-min) {\n float: left;\n width: percentage((@columns / @grid-columns));\n }\n}\n.make-sm-column-offset(@columns) {\n @media (min-width: @screen-sm-min) {\n margin-left: percentage((@columns / @grid-columns));\n }\n}\n.make-sm-column-push(@columns) {\n @media (min-width: @screen-sm-min) {\n left: percentage((@columns / @grid-columns));\n }\n}\n.make-sm-column-pull(@columns) {\n @media (min-width: @screen-sm-min) {\n right: percentage((@columns / @grid-columns));\n }\n}\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n min-height: 1px;\n padding-left: (@gutter / 2);\n padding-right: (@gutter / 2);\n\n @media (min-width: @screen-md-min) {\n float: left;\n width: percentage((@columns / @grid-columns));\n }\n}\n.make-md-column-offset(@columns) {\n @media (min-width: @screen-md-min) {\n margin-left: percentage((@columns / @grid-columns));\n }\n}\n.make-md-column-push(@columns) {\n @media (min-width: @screen-md-min) {\n left: percentage((@columns / @grid-columns));\n }\n}\n.make-md-column-pull(@columns) {\n @media (min-width: @screen-md-min) {\n right: percentage((@columns / @grid-columns));\n }\n}\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n min-height: 1px;\n padding-left: (@gutter / 2);\n padding-right: (@gutter / 2);\n\n @media (min-width: @screen-lg-min) {\n float: left;\n width: percentage((@columns / @grid-columns));\n }\n}\n.make-lg-column-offset(@columns) {\n @media (min-width: @screen-lg-min) {\n margin-left: percentage((@columns / @grid-columns));\n }\n}\n.make-lg-column-push(@columns) {\n @media (min-width: @screen-lg-min) {\n left: percentage((@columns / @grid-columns));\n }\n}\n.make-lg-column-pull(@columns) {\n @media (min-width: @screen-lg-min) {\n right: percentage((@columns / @grid-columns));\n }\n}\n","// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n // Common styles for all sizes of grid columns, widths 1-12\n .col(@index) { // initial\n @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n .col((@index + 1), @item);\n }\n .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n .col((@index + 1), ~\"@{list}, @{item}\");\n }\n .col(@index, @list) when (@index > @grid-columns) { // terminal\n @{list} {\n position: relative;\n // Prevent columns from collapsing when empty\n min-height: 1px;\n // Inner gutter via padding\n padding-left: ceil((@grid-gutter-width / 2));\n padding-right: floor((@grid-gutter-width / 2));\n }\n }\n .col(1); // kickstart it\n}\n\n.float-grid-columns(@class) {\n .col(@index) { // initial\n @item: ~\".col-@{class}-@{index}\";\n .col((@index + 1), @item);\n }\n .col(@index, @list) when (@index =< @grid-columns) { // general\n @item: ~\".col-@{class}-@{index}\";\n .col((@index + 1), ~\"@{list}, @{item}\");\n }\n .col(@index, @list) when (@index > @grid-columns) { // terminal\n @{list} {\n float: left;\n }\n }\n .col(1); // kickstart it\n}\n\n.calc-grid-column(@index, @class, @type) when (@type = width) and (@index > 0) {\n .col-@{class}-@{index} {\n width: percentage((@index / @grid-columns));\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index > 0) {\n .col-@{class}-push-@{index} {\n left: percentage((@index / @grid-columns));\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index = 0) {\n .col-@{class}-push-0 {\n left: auto;\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index > 0) {\n .col-@{class}-pull-@{index} {\n right: percentage((@index / @grid-columns));\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index = 0) {\n .col-@{class}-pull-0 {\n right: auto;\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = offset) {\n .col-@{class}-offset-@{index} {\n margin-left: percentage((@index / @grid-columns));\n }\n}\n\n// Basic looping in LESS\n.loop-grid-columns(@index, @class, @type) when (@index >= 0) {\n .calc-grid-column(@index, @class, @type);\n // next iteration\n .loop-grid-columns((@index - 1), @class, @type);\n}\n\n// Create grid for specific class\n.make-grid(@class) {\n .float-grid-columns(@class);\n .loop-grid-columns(@grid-columns, @class, width);\n .loop-grid-columns(@grid-columns, @class, pull);\n .loop-grid-columns(@grid-columns, @class, push);\n .loop-grid-columns(@grid-columns, @class, offset);\n}\n","//\n// Tables\n// --------------------------------------------------\n\n\ntable {\n background-color: @table-bg;\n}\ncaption {\n padding-top: @table-cell-padding;\n padding-bottom: @table-cell-padding;\n color: @text-muted;\n text-align: left;\n}\nth {\n text-align: left;\n}\n\n\n// Baseline styles\n\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: @line-height-computed;\n // Cells\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n padding: @table-cell-padding;\n line-height: @line-height-base;\n vertical-align: top;\n border-top: 1px solid @table-border-color;\n }\n }\n }\n // Bottom align for column headings\n > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid @table-border-color;\n }\n // Remove top border from thead by default\n > caption + thead,\n > colgroup + thead,\n > thead:first-child {\n > tr:first-child {\n > th,\n > td {\n border-top: 0;\n }\n }\n }\n // Account for multiple tbody instances\n > tbody + tbody {\n border-top: 2px solid @table-border-color;\n }\n\n // Nesting\n .table {\n background-color: @body-bg;\n }\n}\n\n\n// Condensed table w/ half padding\n\n.table-condensed {\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n padding: @table-condensed-cell-padding;\n }\n }\n }\n}\n\n\n// Bordered version\n//\n// Add borders all around the table and between all the columns.\n\n.table-bordered {\n border: 1px solid @table-border-color;\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n border: 1px solid @table-border-color;\n }\n }\n }\n > thead > tr {\n > th,\n > td {\n border-bottom-width: 2px;\n }\n }\n}\n\n\n// Zebra-striping\n//\n// Default zebra-stripe styles (alternating gray and transparent backgrounds)\n\n.table-striped {\n > tbody > tr:nth-of-type(odd) {\n background-color: @table-bg-accent;\n }\n}\n\n\n// Hover effect\n//\n// Placed here since it has to come after the potential zebra striping\n\n.table-hover {\n > tbody > tr:hover {\n background-color: @table-bg-hover;\n }\n}\n\n\n// Table cell sizing\n//\n// Reset default table behavior\n\ntable col[class*=\"col-\"] {\n position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n float: none;\n display: table-column;\n}\ntable {\n td,\n th {\n &[class*=\"col-\"] {\n position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n float: none;\n display: table-cell;\n }\n }\n}\n\n\n// Table backgrounds\n//\n// Exact selectors below required to override `.table-striped` and prevent\n// inheritance to nested tables.\n\n// Generate the contextual variants\n.table-row-variant(active; @table-bg-active);\n.table-row-variant(success; @state-success-bg);\n.table-row-variant(info; @state-info-bg);\n.table-row-variant(warning; @state-warning-bg);\n.table-row-variant(danger; @state-danger-bg);\n\n\n// Responsive tables\n//\n// Wrap your tables in `.table-responsive` and we'll make them mobile friendly\n// by enabling horizontal scrolling. Only applies <768px. Everything above that\n// will display normally.\n\n.table-responsive {\n overflow-x: auto;\n min-height: 0.01%; // Workaround for IE9 bug (see https://github.com/twbs/bootstrap/issues/14837)\n\n @media screen and (max-width: @screen-xs-max) {\n width: 100%;\n margin-bottom: (@line-height-computed * 0.75);\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid @table-border-color;\n\n // Tighten up spacing\n > .table {\n margin-bottom: 0;\n\n // Ensure the content doesn't wrap\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n white-space: nowrap;\n }\n }\n }\n }\n\n // Special overrides for the bordered tables\n > .table-bordered {\n border: 0;\n\n // Nuke the appropriate borders so that the parent can handle them\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th:first-child,\n > td:first-child {\n border-left: 0;\n }\n > th:last-child,\n > td:last-child {\n border-right: 0;\n }\n }\n }\n\n // Only nuke the last row's bottom-border in `tbody` and `tfoot` since\n // chances are there will be only one `tr` in a `thead` and that would\n // remove the border altogether.\n > tbody,\n > tfoot {\n > tr:last-child {\n > th,\n > td {\n border-bottom: 0;\n }\n }\n }\n\n }\n }\n}\n","// Tables\n\n.table-row-variant(@state; @background) {\n // Exact selectors below required to override `.table-striped` and prevent\n // inheritance to nested tables.\n .table > thead > tr,\n .table > tbody > tr,\n .table > tfoot > tr {\n > td.@{state},\n > th.@{state},\n &.@{state} > td,\n &.@{state} > th {\n background-color: @background;\n }\n }\n\n // Hover states for `.table-hover`\n // Note: this is not available for cells or rows within `thead` or `tfoot`.\n .table-hover > tbody > tr {\n > td.@{state}:hover,\n > th.@{state}:hover,\n &.@{state}:hover > td,\n &:hover > .@{state},\n &.@{state}:hover > th {\n background-color: darken(@background, 5%);\n }\n }\n}\n","//\n// Forms\n// --------------------------------------------------\n\n\n// Normalize non-controls\n//\n// Restyle and baseline non-control form elements.\n\nfieldset {\n padding: 0;\n margin: 0;\n border: 0;\n // Chrome and Firefox set a `min-width: min-content;` on fieldsets,\n // so we reset that to ensure it behaves more like a standard block element.\n // See https://github.com/twbs/bootstrap/issues/12359.\n min-width: 0;\n}\n\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: @line-height-computed;\n font-size: (@font-size-base * 1.5);\n line-height: inherit;\n color: @legend-color;\n border: 0;\n border-bottom: 1px solid @legend-border-color;\n}\n\nlabel {\n display: inline-block;\n max-width: 100%; // Force IE8 to wrap long content (see https://github.com/twbs/bootstrap/issues/13141)\n margin-bottom: 5px;\n font-weight: bold;\n}\n\n\n// Normalize form controls\n//\n// While most of our form styles require extra classes, some basic normalization\n// is required to ensure optimum display with or without those classes to better\n// address browser inconsistencies.\n\n// Override content-box in Normalize (* isn't specific enough)\ninput[type=\"search\"] {\n .box-sizing(border-box);\n}\n\n// Position radios and checkboxes better\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9; // IE8-9\n line-height: normal;\n}\n\ninput[type=\"file\"] {\n display: block;\n}\n\n// Make range inputs behave like textual form controls\ninput[type=\"range\"] {\n display: block;\n width: 100%;\n}\n\n// Make multiple select elements height not fixed\nselect[multiple],\nselect[size] {\n height: auto;\n}\n\n// Focus for file, radio, and checkbox\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n .tab-focus();\n}\n\n// Adjust output element\noutput {\n display: block;\n padding-top: (@padding-base-vertical + 1);\n font-size: @font-size-base;\n line-height: @line-height-base;\n color: @input-color;\n}\n\n\n// Common form controls\n//\n// Shared size and type resets for form controls. Apply `.form-control` to any\n// of the following form controls:\n//\n// select\n// textarea\n// input[type=\"text\"]\n// input[type=\"password\"]\n// input[type=\"datetime\"]\n// input[type=\"datetime-local\"]\n// input[type=\"date\"]\n// input[type=\"month\"]\n// input[type=\"time\"]\n// input[type=\"week\"]\n// input[type=\"number\"]\n// input[type=\"email\"]\n// input[type=\"url\"]\n// input[type=\"search\"]\n// input[type=\"tel\"]\n// input[type=\"color\"]\n\n.form-control {\n display: block;\n width: 100%;\n height: @input-height-base; // Make inputs at least the height of their button counterpart (base line-height + padding + border)\n padding: @padding-base-vertical @padding-base-horizontal;\n font-size: @font-size-base;\n line-height: @line-height-base;\n color: @input-color;\n background-color: @input-bg;\n background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n border: 1px solid @input-border;\n border-radius: @input-border-radius; // Note: This has no effect on s in CSS.\n .box-shadow(inset 0 1px 1px rgba(0,0,0,.075));\n .transition(~\"border-color ease-in-out .15s, box-shadow ease-in-out .15s\");\n\n // Customize the `:focus` state to imitate native WebKit styles.\n .form-control-focus();\n\n // Placeholder\n .placeholder();\n\n // Unstyle the caret on ``\n// element gets special love because it's special, and that's a fact!\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n height: @input-height;\n padding: @padding-vertical @padding-horizontal;\n font-size: @font-size;\n line-height: @line-height;\n border-radius: @border-radius;\n\n select& {\n height: @input-height;\n line-height: @input-height;\n }\n\n textarea&,\n select[multiple]& {\n height: auto;\n }\n}\n","//\n// Buttons\n// --------------------------------------------------\n\n\n// Base styles\n// --------------------------------------------------\n\n.btn {\n display: inline-block;\n margin-bottom: 0; // For input.btn\n font-weight: @btn-font-weight;\n text-align: center;\n vertical-align: middle;\n touch-action: manipulation;\n cursor: pointer;\n background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n border: 1px solid transparent;\n white-space: nowrap;\n .button-size(@padding-base-vertical; @padding-base-horizontal; @font-size-base; @line-height-base; @btn-border-radius-base);\n .user-select(none);\n\n &,\n &:active,\n &.active {\n &:focus,\n &.focus {\n .tab-focus();\n }\n }\n\n &:hover,\n &:focus,\n &.focus {\n color: @btn-default-color;\n text-decoration: none;\n }\n\n &:active,\n &.active {\n outline: 0;\n background-image: none;\n .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n }\n\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n cursor: @cursor-disabled;\n .opacity(.65);\n .box-shadow(none);\n }\n\n a& {\n &.disabled,\n fieldset[disabled] & {\n pointer-events: none; // Future-proof disabling of clicks on `` elements\n }\n }\n}\n\n\n// Alternate buttons\n// --------------------------------------------------\n\n.btn-default {\n .button-variant(@btn-default-color; @btn-default-bg; @btn-default-border);\n}\n.btn-primary {\n .button-variant(@btn-primary-color; @btn-primary-bg; @btn-primary-border);\n}\n// Success appears as green\n.btn-success {\n .button-variant(@btn-success-color; @btn-success-bg; @btn-success-border);\n}\n// Info appears as blue-green\n.btn-info {\n .button-variant(@btn-info-color; @btn-info-bg; @btn-info-border);\n}\n// Warning appears as orange\n.btn-warning {\n .button-variant(@btn-warning-color; @btn-warning-bg; @btn-warning-border);\n}\n// Danger and error appear as red\n.btn-danger {\n .button-variant(@btn-danger-color; @btn-danger-bg; @btn-danger-border);\n}\n\n\n// Link buttons\n// -------------------------\n\n// Make a button look and behave like a link\n.btn-link {\n color: @link-color;\n font-weight: normal;\n border-radius: 0;\n\n &,\n &:active,\n &.active,\n &[disabled],\n fieldset[disabled] & {\n background-color: transparent;\n .box-shadow(none);\n }\n &,\n &:hover,\n &:focus,\n &:active {\n border-color: transparent;\n }\n &:hover,\n &:focus {\n color: @link-hover-color;\n text-decoration: @link-hover-decoration;\n background-color: transparent;\n }\n &[disabled],\n fieldset[disabled] & {\n &:hover,\n &:focus {\n color: @btn-link-disabled-color;\n text-decoration: none;\n }\n }\n}\n\n\n// Button Sizes\n// --------------------------------------------------\n\n.btn-lg {\n // line-height: ensure even-numbered height of button next to large input\n .button-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @btn-border-radius-large);\n}\n.btn-sm {\n // line-height: ensure proper height of button next to small input\n .button-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n.btn-xs {\n .button-size(@padding-xs-vertical; @padding-xs-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n\n\n// Block button\n// --------------------------------------------------\n\n.btn-block {\n display: block;\n width: 100%;\n}\n\n// Vertically space out multiple block buttons\n.btn-block + .btn-block {\n margin-top: 5px;\n}\n\n// Specificity overrides\ninput[type=\"submit\"],\ninput[type=\"reset\"],\ninput[type=\"button\"] {\n &.btn-block {\n width: 100%;\n }\n}\n","// Button variants\n//\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n\n.button-variant(@color; @background; @border) {\n color: @color;\n background-color: @background;\n border-color: @border;\n\n &:focus,\n &.focus {\n color: @color;\n background-color: darken(@background, 10%);\n border-color: darken(@border, 25%);\n }\n &:hover {\n color: @color;\n background-color: darken(@background, 10%);\n border-color: darken(@border, 12%);\n }\n &:active,\n &.active,\n .open > .dropdown-toggle& {\n color: @color;\n background-color: darken(@background, 10%);\n border-color: darken(@border, 12%);\n\n &:hover,\n &:focus,\n &.focus {\n color: @color;\n background-color: darken(@background, 17%);\n border-color: darken(@border, 25%);\n }\n }\n &:active,\n &.active,\n .open > .dropdown-toggle& {\n background-image: none;\n }\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n &:hover,\n &:focus,\n &.focus {\n background-color: @background;\n border-color: @border;\n }\n }\n\n .badge {\n color: @background;\n background-color: @color;\n }\n}\n\n// Button sizes\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n padding: @padding-vertical @padding-horizontal;\n font-size: @font-size;\n line-height: @line-height;\n border-radius: @border-radius;\n}\n","// Opacity\n\n.opacity(@opacity) {\n opacity: @opacity;\n // IE8 filter\n @opacity-ie: (@opacity * 100);\n filter: ~\"alpha(opacity=@{opacity-ie})\";\n}\n","//\n// Component animations\n// --------------------------------------------------\n\n// Heads up!\n//\n// We don't use the `.opacity()` mixin here since it causes a bug with text\n// fields in IE7-8. Source: https://github.com/twbs/bootstrap/pull/3552.\n\n.fade {\n opacity: 0;\n .transition(opacity .15s linear);\n &.in {\n opacity: 1;\n }\n}\n\n.collapse {\n display: none;\n\n &.in { display: block; }\n tr&.in { display: table-row; }\n tbody&.in { display: table-row-group; }\n}\n\n.collapsing {\n position: relative;\n height: 0;\n overflow: hidden;\n .transition-property(~\"height, visibility\");\n .transition-duration(.35s);\n .transition-timing-function(ease);\n}\n","//\n// Dropdown menus\n// --------------------------------------------------\n\n\n// Dropdown arrow/caret\n.caret {\n display: inline-block;\n width: 0;\n height: 0;\n margin-left: 2px;\n vertical-align: middle;\n border-top: @caret-width-base dashed;\n border-top: @caret-width-base solid ~\"\\9\"; // IE8\n border-right: @caret-width-base solid transparent;\n border-left: @caret-width-base solid transparent;\n}\n\n// The dropdown wrapper (div)\n.dropup,\n.dropdown {\n position: relative;\n}\n\n// Prevent the focus on the dropdown toggle when closing dropdowns\n.dropdown-toggle:focus {\n outline: 0;\n}\n\n// The dropdown menu (ul)\n.dropdown-menu {\n position: absolute;\n top: 100%;\n left: 0;\n z-index: @zindex-dropdown;\n display: none; // none by default, but block on \"open\" of the menu\n float: left;\n min-width: 160px;\n padding: 5px 0;\n margin: 2px 0 0; // override default ul\n list-style: none;\n font-size: @font-size-base;\n text-align: left; // Ensures proper alignment if parent has it changed (e.g., modal footer)\n background-color: @dropdown-bg;\n border: 1px solid @dropdown-fallback-border; // IE8 fallback\n border: 1px solid @dropdown-border;\n border-radius: @border-radius-base;\n .box-shadow(0 6px 12px rgba(0,0,0,.175));\n background-clip: padding-box;\n\n // Aligns the dropdown menu to right\n //\n // Deprecated as of 3.1.0 in favor of `.dropdown-menu-[dir]`\n &.pull-right {\n right: 0;\n left: auto;\n }\n\n // Dividers (basically an hr) within the dropdown\n .divider {\n .nav-divider(@dropdown-divider-bg);\n }\n\n // Links within the dropdown menu\n > li > a {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: normal;\n line-height: @line-height-base;\n color: @dropdown-link-color;\n white-space: nowrap; // prevent links from randomly breaking onto new lines\n }\n}\n\n// Hover/Focus state\n.dropdown-menu > li > a {\n &:hover,\n &:focus {\n text-decoration: none;\n color: @dropdown-link-hover-color;\n background-color: @dropdown-link-hover-bg;\n }\n}\n\n// Active state\n.dropdown-menu > .active > a {\n &,\n &:hover,\n &:focus {\n color: @dropdown-link-active-color;\n text-decoration: none;\n outline: 0;\n background-color: @dropdown-link-active-bg;\n }\n}\n\n// Disabled state\n//\n// Gray out text and ensure the hover/focus state remains gray\n\n.dropdown-menu > .disabled > a {\n &,\n &:hover,\n &:focus {\n color: @dropdown-link-disabled-color;\n }\n\n // Nuke hover/focus effects\n &:hover,\n &:focus {\n text-decoration: none;\n background-color: transparent;\n background-image: none; // Remove CSS gradient\n .reset-filter();\n cursor: @cursor-disabled;\n }\n}\n\n// Open state for the dropdown\n.open {\n // Show the menu\n > .dropdown-menu {\n display: block;\n }\n\n // Remove the outline when :focus is triggered\n > a {\n outline: 0;\n }\n}\n\n// Menu positioning\n//\n// Add extra class to `.dropdown-menu` to flip the alignment of the dropdown\n// menu with the parent.\n.dropdown-menu-right {\n left: auto; // Reset the default from `.dropdown-menu`\n right: 0;\n}\n// With v3, we enabled auto-flipping if you have a dropdown within a right\n// aligned nav component. To enable the undoing of that, we provide an override\n// to restore the default dropdown menu alignment.\n//\n// This is only for left-aligning a dropdown menu within a `.navbar-right` or\n// `.pull-right` nav component.\n.dropdown-menu-left {\n left: 0;\n right: auto;\n}\n\n// Dropdown section headers\n.dropdown-header {\n display: block;\n padding: 3px 20px;\n font-size: @font-size-small;\n line-height: @line-height-base;\n color: @dropdown-header-color;\n white-space: nowrap; // as with > li > a\n}\n\n// Backdrop to catch body clicks on mobile, etc.\n.dropdown-backdrop {\n position: fixed;\n left: 0;\n right: 0;\n bottom: 0;\n top: 0;\n z-index: (@zindex-dropdown - 10);\n}\n\n// Right aligned dropdowns\n.pull-right > .dropdown-menu {\n right: 0;\n left: auto;\n}\n\n// Allow for dropdowns to go bottom up (aka, dropup-menu)\n//\n// Just add .dropup after the standard .dropdown class and you're set, bro.\n// TODO: abstract this so that the navbar fixed styles are not placed here?\n\n.dropup,\n.navbar-fixed-bottom .dropdown {\n // Reverse the caret\n .caret {\n border-top: 0;\n border-bottom: @caret-width-base dashed;\n border-bottom: @caret-width-base solid ~\"\\9\"; // IE8\n content: \"\";\n }\n // Different positioning for bottom up menu\n .dropdown-menu {\n top: auto;\n bottom: 100%;\n margin-bottom: 2px;\n }\n}\n\n\n// Component alignment\n//\n// Reiterate per navbar.less and the modified component alignment there.\n\n@media (min-width: @grid-float-breakpoint) {\n .navbar-right {\n .dropdown-menu {\n .dropdown-menu-right();\n }\n // Necessary for overrides of the default right aligned menu.\n // Will remove come v4 in all likelihood.\n .dropdown-menu-left {\n .dropdown-menu-left();\n }\n }\n}\n","// Horizontal dividers\n//\n// Dividers (basically an hr) within dropdowns and nav lists\n\n.nav-divider(@color: #e5e5e5) {\n height: 1px;\n margin: ((@line-height-computed / 2) - 1) 0;\n overflow: hidden;\n background-color: @color;\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n","//\n// Button groups\n// --------------------------------------------------\n\n// Make the div behave like a button\n.btn-group,\n.btn-group-vertical {\n position: relative;\n display: inline-block;\n vertical-align: middle; // match .btn alignment given font-size hack above\n > .btn {\n position: relative;\n float: left;\n // Bring the \"active\" button to the front\n &:hover,\n &:focus,\n &:active,\n &.active {\n z-index: 2;\n }\n }\n}\n\n// Prevent double borders when buttons are next to each other\n.btn-group {\n .btn + .btn,\n .btn + .btn-group,\n .btn-group + .btn,\n .btn-group + .btn-group {\n margin-left: -1px;\n }\n}\n\n// Optional: Group multiple button groups together for a toolbar\n.btn-toolbar {\n margin-left: -5px; // Offset the first child's margin\n &:extend(.clearfix all);\n\n .btn,\n .btn-group,\n .input-group {\n float: left;\n }\n > .btn,\n > .btn-group,\n > .input-group {\n margin-left: 5px;\n }\n}\n\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n border-radius: 0;\n}\n\n// Set corners individual because sometimes a single button can be in a .btn-group and we need :first-child and :last-child to both match\n.btn-group > .btn:first-child {\n margin-left: 0;\n &:not(:last-child):not(.dropdown-toggle) {\n .border-right-radius(0);\n }\n}\n// Need .dropdown-toggle since :last-child doesn't apply, given that a .dropdown-menu is used immediately after it\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n .border-left-radius(0);\n}\n\n// Custom edits for including btn-groups within btn-groups (useful for including dropdown buttons within a btn-group)\n.btn-group > .btn-group {\n float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) {\n > .btn:last-child,\n > .dropdown-toggle {\n .border-right-radius(0);\n }\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n .border-left-radius(0);\n}\n\n// On active and open, don't show outline\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n outline: 0;\n}\n\n\n// Sizing\n//\n// Remix the default button sizing classes into new ones for easier manipulation.\n\n.btn-group-xs > .btn { &:extend(.btn-xs); }\n.btn-group-sm > .btn { &:extend(.btn-sm); }\n.btn-group-lg > .btn { &:extend(.btn-lg); }\n\n\n// Split button dropdowns\n// ----------------------\n\n// Give the line between buttons some depth\n.btn-group > .btn + .dropdown-toggle {\n padding-left: 8px;\n padding-right: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n padding-left: 12px;\n padding-right: 12px;\n}\n\n// The clickable button for toggling the menu\n// Remove the gradient and set the same inset shadow as the :active state\n.btn-group.open .dropdown-toggle {\n .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n\n // Show no shadow for `.btn-link` since it has no other button styles.\n &.btn-link {\n .box-shadow(none);\n }\n}\n\n\n// Reposition the caret\n.btn .caret {\n margin-left: 0;\n}\n// Carets in other button sizes\n.btn-lg .caret {\n border-width: @caret-width-large @caret-width-large 0;\n border-bottom-width: 0;\n}\n// Upside down carets for .dropup\n.dropup .btn-lg .caret {\n border-width: 0 @caret-width-large @caret-width-large;\n}\n\n\n// Vertical button groups\n// ----------------------\n\n.btn-group-vertical {\n > .btn,\n > .btn-group,\n > .btn-group > .btn {\n display: block;\n float: none;\n width: 100%;\n max-width: 100%;\n }\n\n // Clear floats so dropdown menus can be properly placed\n > .btn-group {\n &:extend(.clearfix all);\n > .btn {\n float: none;\n }\n }\n\n > .btn + .btn,\n > .btn + .btn-group,\n > .btn-group + .btn,\n > .btn-group + .btn-group {\n margin-top: -1px;\n margin-left: 0;\n }\n}\n\n.btn-group-vertical > .btn {\n &:not(:first-child):not(:last-child) {\n border-radius: 0;\n }\n &:first-child:not(:last-child) {\n .border-top-radius(@btn-border-radius-base);\n .border-bottom-radius(0);\n }\n &:last-child:not(:first-child) {\n .border-top-radius(0);\n .border-bottom-radius(@btn-border-radius-base);\n }\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) {\n > .btn:last-child,\n > .dropdown-toggle {\n .border-bottom-radius(0);\n }\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n .border-top-radius(0);\n}\n\n\n// Justified button groups\n// ----------------------\n\n.btn-group-justified {\n display: table;\n width: 100%;\n table-layout: fixed;\n border-collapse: separate;\n > .btn,\n > .btn-group {\n float: none;\n display: table-cell;\n width: 1%;\n }\n > .btn-group .btn {\n width: 100%;\n }\n\n > .btn-group .dropdown-menu {\n left: auto;\n }\n}\n\n\n// Checkbox and radio options\n//\n// In order to support the browser's form validation feedback, powered by the\n// `required` attribute, we have to \"hide\" the inputs via `clip`. We cannot use\n// `display: none;` or `visibility: hidden;` as that also hides the popover.\n// Simply visually hiding the inputs via `opacity` would leave them clickable in\n// certain cases which is prevented by using `clip` and `pointer-events`.\n// This way, we ensure a DOM element is visible to position the popover from.\n//\n// See https://github.com/twbs/bootstrap/pull/12794 and\n// https://github.com/twbs/bootstrap/pull/14559 for more information.\n\n[data-toggle=\"buttons\"] {\n > .btn,\n > .btn-group > .btn {\n input[type=\"radio\"],\n input[type=\"checkbox\"] {\n position: absolute;\n clip: rect(0,0,0,0);\n pointer-events: none;\n }\n }\n}\n","// Single side border-radius\n\n.border-top-radius(@radius) {\n border-top-right-radius: @radius;\n border-top-left-radius: @radius;\n}\n.border-right-radius(@radius) {\n border-bottom-right-radius: @radius;\n border-top-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n border-bottom-right-radius: @radius;\n border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n border-bottom-left-radius: @radius;\n border-top-left-radius: @radius;\n}\n","//\n// Input groups\n// --------------------------------------------------\n\n// Base styles\n// -------------------------\n.input-group {\n position: relative; // For dropdowns\n display: table;\n border-collapse: separate; // prevent input groups from inheriting border styles from table cells when placed within a table\n\n // Undo padding and float of grid classes\n &[class*=\"col-\"] {\n float: none;\n padding-left: 0;\n padding-right: 0;\n }\n\n .form-control {\n // Ensure that the input is always above the *appended* addon button for\n // proper border colors.\n position: relative;\n z-index: 2;\n\n // IE9 fubars the placeholder attribute in text inputs and the arrows on\n // select elements in input groups. To fix it, we float the input. Details:\n // https://github.com/twbs/bootstrap/issues/11561#issuecomment-28936855\n float: left;\n\n width: 100%;\n margin-bottom: 0;\n\n &:focus {\n z-index: 3;\n }\n }\n}\n\n// Sizing options\n//\n// Remix the default form control sizing classes into new ones for easier\n// manipulation.\n\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n .input-lg();\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n .input-sm();\n}\n\n\n// Display as table-cell\n// -------------------------\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n display: table-cell;\n\n &:not(:first-child):not(:last-child) {\n border-radius: 0;\n }\n}\n// Addon and addon wrapper for buttons\n.input-group-addon,\n.input-group-btn {\n width: 1%;\n white-space: nowrap;\n vertical-align: middle; // Match the inputs\n}\n\n// Text input groups\n// -------------------------\n.input-group-addon {\n padding: @padding-base-vertical @padding-base-horizontal;\n font-size: @font-size-base;\n font-weight: normal;\n line-height: 1;\n color: @input-color;\n text-align: center;\n background-color: @input-group-addon-bg;\n border: 1px solid @input-group-addon-border-color;\n border-radius: @input-border-radius;\n\n // Sizing\n &.input-sm {\n padding: @padding-small-vertical @padding-small-horizontal;\n font-size: @font-size-small;\n border-radius: @input-border-radius-small;\n }\n &.input-lg {\n padding: @padding-large-vertical @padding-large-horizontal;\n font-size: @font-size-large;\n border-radius: @input-border-radius-large;\n }\n\n // Nuke default margins from checkboxes and radios to vertically center within.\n input[type=\"radio\"],\n input[type=\"checkbox\"] {\n margin-top: 0;\n }\n}\n\n// Reset rounded corners\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n .border-right-radius(0);\n}\n.input-group-addon:first-child {\n border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n .border-left-radius(0);\n}\n.input-group-addon:last-child {\n border-left: 0;\n}\n\n// Button input groups\n// -------------------------\n.input-group-btn {\n position: relative;\n // Jankily prevent input button groups from wrapping with `white-space` and\n // `font-size` in combination with `inline-block` on buttons.\n font-size: 0;\n white-space: nowrap;\n\n // Negative margin for spacing, position for bringing hovered/focused/actived\n // element above the siblings.\n > .btn {\n position: relative;\n + .btn {\n margin-left: -1px;\n }\n // Bring the \"active\" button to the front\n &:hover,\n &:focus,\n &:active {\n z-index: 2;\n }\n }\n\n // Negative margin to only have a 1px border between the two\n &:first-child {\n > .btn,\n > .btn-group {\n margin-right: -1px;\n }\n }\n &:last-child {\n > .btn,\n > .btn-group {\n z-index: 2;\n margin-left: -1px;\n }\n }\n}\n","//\n// Navs\n// --------------------------------------------------\n\n\n// Base class\n// --------------------------------------------------\n\n.nav {\n margin-bottom: 0;\n padding-left: 0; // Override default ul/ol\n list-style: none;\n &:extend(.clearfix all);\n\n > li {\n position: relative;\n display: block;\n\n > a {\n position: relative;\n display: block;\n padding: @nav-link-padding;\n &:hover,\n &:focus {\n text-decoration: none;\n background-color: @nav-link-hover-bg;\n }\n }\n\n // Disabled state sets text to gray and nukes hover/tab effects\n &.disabled > a {\n color: @nav-disabled-link-color;\n\n &:hover,\n &:focus {\n color: @nav-disabled-link-hover-color;\n text-decoration: none;\n background-color: transparent;\n cursor: @cursor-disabled;\n }\n }\n }\n\n // Open dropdowns\n .open > a {\n &,\n &:hover,\n &:focus {\n background-color: @nav-link-hover-bg;\n border-color: @link-color;\n }\n }\n\n // Nav dividers (deprecated with v3.0.1)\n //\n // This should have been removed in v3 with the dropping of `.nav-list`, but\n // we missed it. We don't currently support this anywhere, but in the interest\n // of maintaining backward compatibility in case you use it, it's deprecated.\n .nav-divider {\n .nav-divider();\n }\n\n // Prevent IE8 from misplacing imgs\n //\n // See https://github.com/h5bp/html5-boilerplate/issues/984#issuecomment-3985989\n > li > a > img {\n max-width: none;\n }\n}\n\n\n// Tabs\n// -------------------------\n\n// Give the tabs something to sit on\n.nav-tabs {\n border-bottom: 1px solid @nav-tabs-border-color;\n > li {\n float: left;\n // Make the list-items overlay the bottom border\n margin-bottom: -1px;\n\n // Actual tabs (as links)\n > a {\n margin-right: 2px;\n line-height: @line-height-base;\n border: 1px solid transparent;\n border-radius: @border-radius-base @border-radius-base 0 0;\n &:hover {\n border-color: @nav-tabs-link-hover-border-color @nav-tabs-link-hover-border-color @nav-tabs-border-color;\n }\n }\n\n // Active state, and its :hover to override normal :hover\n &.active > a {\n &,\n &:hover,\n &:focus {\n color: @nav-tabs-active-link-hover-color;\n background-color: @nav-tabs-active-link-hover-bg;\n border: 1px solid @nav-tabs-active-link-hover-border-color;\n border-bottom-color: transparent;\n cursor: default;\n }\n }\n }\n // pulling this in mainly for less shorthand\n &.nav-justified {\n .nav-justified();\n .nav-tabs-justified();\n }\n}\n\n\n// Pills\n// -------------------------\n.nav-pills {\n > li {\n float: left;\n\n // Links rendered as pills\n > a {\n border-radius: @nav-pills-border-radius;\n }\n + li {\n margin-left: 2px;\n }\n\n // Active state\n &.active > a {\n &,\n &:hover,\n &:focus {\n color: @nav-pills-active-link-hover-color;\n background-color: @nav-pills-active-link-hover-bg;\n }\n }\n }\n}\n\n\n// Stacked pills\n.nav-stacked {\n > li {\n float: none;\n + li {\n margin-top: 2px;\n margin-left: 0; // no need for this gap between nav items\n }\n }\n}\n\n\n// Nav variations\n// --------------------------------------------------\n\n// Justified nav links\n// -------------------------\n\n.nav-justified {\n width: 100%;\n\n > li {\n float: none;\n > a {\n text-align: center;\n margin-bottom: 5px;\n }\n }\n\n > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n }\n\n @media (min-width: @screen-sm-min) {\n > li {\n display: table-cell;\n width: 1%;\n > a {\n margin-bottom: 0;\n }\n }\n }\n}\n\n// Move borders to anchors instead of bottom of list\n//\n// Mixin for adding on top the shared `.nav-justified` styles for our tabs\n.nav-tabs-justified {\n border-bottom: 0;\n\n > li > a {\n // Override margin from .nav-tabs\n margin-right: 0;\n border-radius: @border-radius-base;\n }\n\n > .active > a,\n > .active > a:hover,\n > .active > a:focus {\n border: 1px solid @nav-tabs-justified-link-border-color;\n }\n\n @media (min-width: @screen-sm-min) {\n > li > a {\n border-bottom: 1px solid @nav-tabs-justified-link-border-color;\n border-radius: @border-radius-base @border-radius-base 0 0;\n }\n > .active > a,\n > .active > a:hover,\n > .active > a:focus {\n border-bottom-color: @nav-tabs-justified-active-link-border-color;\n }\n }\n}\n\n\n// Tabbable tabs\n// -------------------------\n\n// Hide tabbable panes to start, show them when `.active`\n.tab-content {\n > .tab-pane {\n display: none;\n }\n > .active {\n display: block;\n }\n}\n\n\n// Dropdowns\n// -------------------------\n\n// Specific dropdowns\n.nav-tabs .dropdown-menu {\n // make dropdown border overlap tab border\n margin-top: -1px;\n // Remove the top rounded corners here since there is a hard edge above the menu\n .border-top-radius(0);\n}\n","//\n// Navbars\n// --------------------------------------------------\n\n\n// Wrapper and base class\n//\n// Provide a static navbar from which we expand to create full-width, fixed, and\n// other navbar variations.\n\n.navbar {\n position: relative;\n min-height: @navbar-height; // Ensure a navbar always shows (e.g., without a .navbar-brand in collapsed mode)\n margin-bottom: @navbar-margin-bottom;\n border: 1px solid transparent;\n\n // Prevent floats from breaking the navbar\n &:extend(.clearfix all);\n\n @media (min-width: @grid-float-breakpoint) {\n border-radius: @navbar-border-radius;\n }\n}\n\n\n// Navbar heading\n//\n// Groups `.navbar-brand` and `.navbar-toggle` into a single component for easy\n// styling of responsive aspects.\n\n.navbar-header {\n &:extend(.clearfix all);\n\n @media (min-width: @grid-float-breakpoint) {\n float: left;\n }\n}\n\n\n// Navbar collapse (body)\n//\n// Group your navbar content into this for easy collapsing and expanding across\n// various device sizes. By default, this content is collapsed when <768px, but\n// will expand past that for a horizontal display.\n//\n// To start (on mobile devices) the navbar links, forms, and buttons are stacked\n// vertically and include a `max-height` to overflow in case you have too much\n// content for the user's viewport.\n\n.navbar-collapse {\n overflow-x: visible;\n padding-right: @navbar-padding-horizontal;\n padding-left: @navbar-padding-horizontal;\n border-top: 1px solid transparent;\n box-shadow: inset 0 1px 0 rgba(255,255,255,.1);\n &:extend(.clearfix all);\n -webkit-overflow-scrolling: touch;\n\n &.in {\n overflow-y: auto;\n }\n\n @media (min-width: @grid-float-breakpoint) {\n width: auto;\n border-top: 0;\n box-shadow: none;\n\n &.collapse {\n display: block !important;\n height: auto !important;\n padding-bottom: 0; // Override default setting\n overflow: visible !important;\n }\n\n &.in {\n overflow-y: visible;\n }\n\n // Undo the collapse side padding for navbars with containers to ensure\n // alignment of right-aligned contents.\n .navbar-fixed-top &,\n .navbar-static-top &,\n .navbar-fixed-bottom & {\n padding-left: 0;\n padding-right: 0;\n }\n }\n}\n\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n .navbar-collapse {\n max-height: @navbar-collapse-max-height;\n\n @media (max-device-width: @screen-xs-min) and (orientation: landscape) {\n max-height: 200px;\n }\n }\n}\n\n\n// Both navbar header and collapse\n//\n// When a container is present, change the behavior of the header and collapse.\n\n.container,\n.container-fluid {\n > .navbar-header,\n > .navbar-collapse {\n margin-right: -@navbar-padding-horizontal;\n margin-left: -@navbar-padding-horizontal;\n\n @media (min-width: @grid-float-breakpoint) {\n margin-right: 0;\n margin-left: 0;\n }\n }\n}\n\n\n//\n// Navbar alignment options\n//\n// Display the navbar across the entirety of the page or fixed it to the top or\n// bottom of the page.\n\n// Static top (unfixed, but 100% wide) navbar\n.navbar-static-top {\n z-index: @zindex-navbar;\n border-width: 0 0 1px;\n\n @media (min-width: @grid-float-breakpoint) {\n border-radius: 0;\n }\n}\n\n// Fix the top/bottom navbars when screen real estate supports it\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n position: fixed;\n right: 0;\n left: 0;\n z-index: @zindex-navbar-fixed;\n\n // Undo the rounded corners\n @media (min-width: @grid-float-breakpoint) {\n border-radius: 0;\n }\n}\n.navbar-fixed-top {\n top: 0;\n border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n bottom: 0;\n margin-bottom: 0; // override .navbar defaults\n border-width: 1px 0 0;\n}\n\n\n// Brand/project name\n\n.navbar-brand {\n float: left;\n padding: @navbar-padding-vertical @navbar-padding-horizontal;\n font-size: @font-size-large;\n line-height: @line-height-computed;\n height: @navbar-height;\n\n &:hover,\n &:focus {\n text-decoration: none;\n }\n\n > img {\n display: block;\n }\n\n @media (min-width: @grid-float-breakpoint) {\n .navbar > .container &,\n .navbar > .container-fluid & {\n margin-left: -@navbar-padding-horizontal;\n }\n }\n}\n\n\n// Navbar toggle\n//\n// Custom button for toggling the `.navbar-collapse`, powered by the collapse\n// JavaScript plugin.\n\n.navbar-toggle {\n position: relative;\n float: right;\n margin-right: @navbar-padding-horizontal;\n padding: 9px 10px;\n .navbar-vertical-align(34px);\n background-color: transparent;\n background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n border: 1px solid transparent;\n border-radius: @border-radius-base;\n\n // We remove the `outline` here, but later compensate by attaching `:hover`\n // styles to `:focus`.\n &:focus {\n outline: 0;\n }\n\n // Bars\n .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n border-radius: 1px;\n }\n .icon-bar + .icon-bar {\n margin-top: 4px;\n }\n\n @media (min-width: @grid-float-breakpoint) {\n display: none;\n }\n}\n\n\n// Navbar nav links\n//\n// Builds on top of the `.nav` components with its own modifier class to make\n// the nav the full height of the horizontal nav (above 768px).\n\n.navbar-nav {\n margin: (@navbar-padding-vertical / 2) -@navbar-padding-horizontal;\n\n > li > a {\n padding-top: 10px;\n padding-bottom: 10px;\n line-height: @line-height-computed;\n }\n\n @media (max-width: @grid-float-breakpoint-max) {\n // Dropdowns get custom display when collapsed\n .open .dropdown-menu {\n position: static;\n float: none;\n width: auto;\n margin-top: 0;\n background-color: transparent;\n border: 0;\n box-shadow: none;\n > li > a,\n .dropdown-header {\n padding: 5px 15px 5px 25px;\n }\n > li > a {\n line-height: @line-height-computed;\n &:hover,\n &:focus {\n background-image: none;\n }\n }\n }\n }\n\n // Uncollapse the nav\n @media (min-width: @grid-float-breakpoint) {\n float: left;\n margin: 0;\n\n > li {\n float: left;\n > a {\n padding-top: @navbar-padding-vertical;\n padding-bottom: @navbar-padding-vertical;\n }\n }\n }\n}\n\n\n// Navbar form\n//\n// Extension of the `.form-inline` with some extra flavor for optimum display in\n// our navbars.\n\n.navbar-form {\n margin-left: -@navbar-padding-horizontal;\n margin-right: -@navbar-padding-horizontal;\n padding: 10px @navbar-padding-horizontal;\n border-top: 1px solid transparent;\n border-bottom: 1px solid transparent;\n @shadow: inset 0 1px 0 rgba(255,255,255,.1), 0 1px 0 rgba(255,255,255,.1);\n .box-shadow(@shadow);\n\n // Mixin behavior for optimum display\n .form-inline();\n\n .form-group {\n @media (max-width: @grid-float-breakpoint-max) {\n margin-bottom: 5px;\n\n &:last-child {\n margin-bottom: 0;\n }\n }\n }\n\n // Vertically center in expanded, horizontal navbar\n .navbar-vertical-align(@input-height-base);\n\n // Undo 100% width for pull classes\n @media (min-width: @grid-float-breakpoint) {\n width: auto;\n border: 0;\n margin-left: 0;\n margin-right: 0;\n padding-top: 0;\n padding-bottom: 0;\n .box-shadow(none);\n }\n}\n\n\n// Dropdown menus\n\n// Menu position and menu carets\n.navbar-nav > li > .dropdown-menu {\n margin-top: 0;\n .border-top-radius(0);\n}\n// Menu position and menu caret support for dropups via extra dropup class\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n margin-bottom: 0;\n .border-top-radius(@navbar-border-radius);\n .border-bottom-radius(0);\n}\n\n\n// Buttons in navbars\n//\n// Vertically center a button within a navbar (when *not* in a form).\n\n.navbar-btn {\n .navbar-vertical-align(@input-height-base);\n\n &.btn-sm {\n .navbar-vertical-align(@input-height-small);\n }\n &.btn-xs {\n .navbar-vertical-align(22);\n }\n}\n\n\n// Text in navbars\n//\n// Add a class to make any element properly align itself vertically within the navbars.\n\n.navbar-text {\n .navbar-vertical-align(@line-height-computed);\n\n @media (min-width: @grid-float-breakpoint) {\n float: left;\n margin-left: @navbar-padding-horizontal;\n margin-right: @navbar-padding-horizontal;\n }\n}\n\n\n// Component alignment\n//\n// Repurpose the pull utilities as their own navbar utilities to avoid specificity\n// issues with parents and chaining. Only do this when the navbar is uncollapsed\n// though so that navbar contents properly stack and align in mobile.\n//\n// Declared after the navbar components to ensure more specificity on the margins.\n\n@media (min-width: @grid-float-breakpoint) {\n .navbar-left { .pull-left(); }\n .navbar-right {\n .pull-right();\n margin-right: -@navbar-padding-horizontal;\n\n ~ .navbar-right {\n margin-right: 0;\n }\n }\n}\n\n\n// Alternate navbars\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n background-color: @navbar-default-bg;\n border-color: @navbar-default-border;\n\n .navbar-brand {\n color: @navbar-default-brand-color;\n &:hover,\n &:focus {\n color: @navbar-default-brand-hover-color;\n background-color: @navbar-default-brand-hover-bg;\n }\n }\n\n .navbar-text {\n color: @navbar-default-color;\n }\n\n .navbar-nav {\n > li > a {\n color: @navbar-default-link-color;\n\n &:hover,\n &:focus {\n color: @navbar-default-link-hover-color;\n background-color: @navbar-default-link-hover-bg;\n }\n }\n > .active > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-default-link-active-color;\n background-color: @navbar-default-link-active-bg;\n }\n }\n > .disabled > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-default-link-disabled-color;\n background-color: @navbar-default-link-disabled-bg;\n }\n }\n }\n\n .navbar-toggle {\n border-color: @navbar-default-toggle-border-color;\n &:hover,\n &:focus {\n background-color: @navbar-default-toggle-hover-bg;\n }\n .icon-bar {\n background-color: @navbar-default-toggle-icon-bar-bg;\n }\n }\n\n .navbar-collapse,\n .navbar-form {\n border-color: @navbar-default-border;\n }\n\n // Dropdown menu items\n .navbar-nav {\n // Remove background color from open dropdown\n > .open > a {\n &,\n &:hover,\n &:focus {\n background-color: @navbar-default-link-active-bg;\n color: @navbar-default-link-active-color;\n }\n }\n\n @media (max-width: @grid-float-breakpoint-max) {\n // Dropdowns get custom display when collapsed\n .open .dropdown-menu {\n > li > a {\n color: @navbar-default-link-color;\n &:hover,\n &:focus {\n color: @navbar-default-link-hover-color;\n background-color: @navbar-default-link-hover-bg;\n }\n }\n > .active > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-default-link-active-color;\n background-color: @navbar-default-link-active-bg;\n }\n }\n > .disabled > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-default-link-disabled-color;\n background-color: @navbar-default-link-disabled-bg;\n }\n }\n }\n }\n }\n\n\n // Links in navbars\n //\n // Add a class to ensure links outside the navbar nav are colored correctly.\n\n .navbar-link {\n color: @navbar-default-link-color;\n &:hover {\n color: @navbar-default-link-hover-color;\n }\n }\n\n .btn-link {\n color: @navbar-default-link-color;\n &:hover,\n &:focus {\n color: @navbar-default-link-hover-color;\n }\n &[disabled],\n fieldset[disabled] & {\n &:hover,\n &:focus {\n color: @navbar-default-link-disabled-color;\n }\n }\n }\n}\n\n// Inverse navbar\n\n.navbar-inverse {\n background-color: @navbar-inverse-bg;\n border-color: @navbar-inverse-border;\n\n .navbar-brand {\n color: @navbar-inverse-brand-color;\n &:hover,\n &:focus {\n color: @navbar-inverse-brand-hover-color;\n background-color: @navbar-inverse-brand-hover-bg;\n }\n }\n\n .navbar-text {\n color: @navbar-inverse-color;\n }\n\n .navbar-nav {\n > li > a {\n color: @navbar-inverse-link-color;\n\n &:hover,\n &:focus {\n color: @navbar-inverse-link-hover-color;\n background-color: @navbar-inverse-link-hover-bg;\n }\n }\n > .active > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-inverse-link-active-color;\n background-color: @navbar-inverse-link-active-bg;\n }\n }\n > .disabled > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-inverse-link-disabled-color;\n background-color: @navbar-inverse-link-disabled-bg;\n }\n }\n }\n\n // Darken the responsive nav toggle\n .navbar-toggle {\n border-color: @navbar-inverse-toggle-border-color;\n &:hover,\n &:focus {\n background-color: @navbar-inverse-toggle-hover-bg;\n }\n .icon-bar {\n background-color: @navbar-inverse-toggle-icon-bar-bg;\n }\n }\n\n .navbar-collapse,\n .navbar-form {\n border-color: darken(@navbar-inverse-bg, 7%);\n }\n\n // Dropdowns\n .navbar-nav {\n > .open > a {\n &,\n &:hover,\n &:focus {\n background-color: @navbar-inverse-link-active-bg;\n color: @navbar-inverse-link-active-color;\n }\n }\n\n @media (max-width: @grid-float-breakpoint-max) {\n // Dropdowns get custom display\n .open .dropdown-menu {\n > .dropdown-header {\n border-color: @navbar-inverse-border;\n }\n .divider {\n background-color: @navbar-inverse-border;\n }\n > li > a {\n color: @navbar-inverse-link-color;\n &:hover,\n &:focus {\n color: @navbar-inverse-link-hover-color;\n background-color: @navbar-inverse-link-hover-bg;\n }\n }\n > .active > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-inverse-link-active-color;\n background-color: @navbar-inverse-link-active-bg;\n }\n }\n > .disabled > a {\n &,\n &:hover,\n &:focus {\n color: @navbar-inverse-link-disabled-color;\n background-color: @navbar-inverse-link-disabled-bg;\n }\n }\n }\n }\n }\n\n .navbar-link {\n color: @navbar-inverse-link-color;\n &:hover {\n color: @navbar-inverse-link-hover-color;\n }\n }\n\n .btn-link {\n color: @navbar-inverse-link-color;\n &:hover,\n &:focus {\n color: @navbar-inverse-link-hover-color;\n }\n &[disabled],\n fieldset[disabled] & {\n &:hover,\n &:focus {\n color: @navbar-inverse-link-disabled-color;\n }\n }\n }\n}\n","// Navbar vertical align\n//\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n\n.navbar-vertical-align(@element-height) {\n margin-top: ((@navbar-height - @element-height) / 2);\n margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n","//\n// Utility classes\n// --------------------------------------------------\n\n\n// Floats\n// -------------------------\n\n.clearfix {\n .clearfix();\n}\n.center-block {\n .center-block();\n}\n.pull-right {\n float: right !important;\n}\n.pull-left {\n float: left !important;\n}\n\n\n// Toggling content\n// -------------------------\n\n// Note: Deprecated .hide in favor of .hidden or .sr-only (as appropriate) in v3.0.1\n.hide {\n display: none !important;\n}\n.show {\n display: block !important;\n}\n.invisible {\n visibility: hidden;\n}\n.text-hide {\n .text-hide();\n}\n\n\n// Hide from screenreaders and browsers\n//\n// Credit: HTML5 Boilerplate\n\n.hidden {\n display: none !important;\n}\n\n\n// For Affix plugin\n// -------------------------\n\n.affix {\n position: fixed;\n}\n","//\n// Breadcrumbs\n// --------------------------------------------------\n\n\n.breadcrumb {\n padding: @breadcrumb-padding-vertical @breadcrumb-padding-horizontal;\n margin-bottom: @line-height-computed;\n list-style: none;\n background-color: @breadcrumb-bg;\n border-radius: @border-radius-base;\n\n > li {\n display: inline-block;\n\n + li:before {\n content: \"@{breadcrumb-separator}\\00a0\"; // Unicode space added since inline-block means non-collapsing white-space\n padding: 0 5px;\n color: @breadcrumb-color;\n }\n }\n\n > .active {\n color: @breadcrumb-active-color;\n }\n}\n","//\n// Pagination (multiple pages)\n// --------------------------------------------------\n.pagination {\n display: inline-block;\n padding-left: 0;\n margin: @line-height-computed 0;\n border-radius: @border-radius-base;\n\n > li {\n display: inline; // Remove list-style and block-level defaults\n > a,\n > span {\n position: relative;\n float: left; // Collapse white-space\n padding: @padding-base-vertical @padding-base-horizontal;\n line-height: @line-height-base;\n text-decoration: none;\n color: @pagination-color;\n background-color: @pagination-bg;\n border: 1px solid @pagination-border;\n margin-left: -1px;\n }\n &:first-child {\n > a,\n > span {\n margin-left: 0;\n .border-left-radius(@border-radius-base);\n }\n }\n &:last-child {\n > a,\n > span {\n .border-right-radius(@border-radius-base);\n }\n }\n }\n\n > li > a,\n > li > span {\n &:hover,\n &:focus {\n z-index: 2;\n color: @pagination-hover-color;\n background-color: @pagination-hover-bg;\n border-color: @pagination-hover-border;\n }\n }\n\n > .active > a,\n > .active > span {\n &,\n &:hover,\n &:focus {\n z-index: 3;\n color: @pagination-active-color;\n background-color: @pagination-active-bg;\n border-color: @pagination-active-border;\n cursor: default;\n }\n }\n\n > .disabled {\n > span,\n > span:hover,\n > span:focus,\n > a,\n > a:hover,\n > a:focus {\n color: @pagination-disabled-color;\n background-color: @pagination-disabled-bg;\n border-color: @pagination-disabled-border;\n cursor: @cursor-disabled;\n }\n }\n}\n\n// Sizing\n// --------------------------------------------------\n\n// Large\n.pagination-lg {\n .pagination-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n\n// Small\n.pagination-sm {\n .pagination-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n","// Pagination\n\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n > li {\n > a,\n > span {\n padding: @padding-vertical @padding-horizontal;\n font-size: @font-size;\n line-height: @line-height;\n }\n &:first-child {\n > a,\n > span {\n .border-left-radius(@border-radius);\n }\n }\n &:last-child {\n > a,\n > span {\n .border-right-radius(@border-radius);\n }\n }\n }\n}\n","//\n// Pager pagination\n// --------------------------------------------------\n\n\n.pager {\n padding-left: 0;\n margin: @line-height-computed 0;\n list-style: none;\n text-align: center;\n &:extend(.clearfix all);\n li {\n display: inline;\n > a,\n > span {\n display: inline-block;\n padding: 5px 14px;\n background-color: @pager-bg;\n border: 1px solid @pager-border;\n border-radius: @pager-border-radius;\n }\n\n > a:hover,\n > a:focus {\n text-decoration: none;\n background-color: @pager-hover-bg;\n }\n }\n\n .next {\n > a,\n > span {\n float: right;\n }\n }\n\n .previous {\n > a,\n > span {\n float: left;\n }\n }\n\n .disabled {\n > a,\n > a:hover,\n > a:focus,\n > span {\n color: @pager-disabled-color;\n background-color: @pager-bg;\n cursor: @cursor-disabled;\n }\n }\n}\n","//\n// Labels\n// --------------------------------------------------\n\n.label {\n display: inline;\n padding: .2em .6em .3em;\n font-size: 75%;\n font-weight: bold;\n line-height: 1;\n color: @label-color;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n border-radius: .25em;\n\n // Add hover effects, but only for links\n a& {\n &:hover,\n &:focus {\n color: @label-link-hover-color;\n text-decoration: none;\n cursor: pointer;\n }\n }\n\n // Empty labels collapse automatically (not available in IE8)\n &:empty {\n display: none;\n }\n\n // Quick fix for labels in buttons\n .btn & {\n position: relative;\n top: -1px;\n }\n}\n\n// Colors\n// Contextual variations (linked labels get darker on :hover)\n\n.label-default {\n .label-variant(@label-default-bg);\n}\n\n.label-primary {\n .label-variant(@label-primary-bg);\n}\n\n.label-success {\n .label-variant(@label-success-bg);\n}\n\n.label-info {\n .label-variant(@label-info-bg);\n}\n\n.label-warning {\n .label-variant(@label-warning-bg);\n}\n\n.label-danger {\n .label-variant(@label-danger-bg);\n}\n","// Labels\n\n.label-variant(@color) {\n background-color: @color;\n\n &[href] {\n &:hover,\n &:focus {\n background-color: darken(@color, 10%);\n }\n }\n}\n","//\n// Badges\n// --------------------------------------------------\n\n\n// Base class\n.badge {\n display: inline-block;\n min-width: 10px;\n padding: 3px 7px;\n font-size: @font-size-small;\n font-weight: @badge-font-weight;\n color: @badge-color;\n line-height: @badge-line-height;\n vertical-align: middle;\n white-space: nowrap;\n text-align: center;\n background-color: @badge-bg;\n border-radius: @badge-border-radius;\n\n // Empty badges collapse automatically (not available in IE8)\n &:empty {\n display: none;\n }\n\n // Quick fix for badges in buttons\n .btn & {\n position: relative;\n top: -1px;\n }\n\n .btn-xs &,\n .btn-group-xs > .btn & {\n top: 0;\n padding: 1px 5px;\n }\n\n // Hover state, but only for links\n a& {\n &:hover,\n &:focus {\n color: @badge-link-hover-color;\n text-decoration: none;\n cursor: pointer;\n }\n }\n\n // Account for badges in navs\n .list-group-item.active > &,\n .nav-pills > .active > a > & {\n color: @badge-active-color;\n background-color: @badge-active-bg;\n }\n\n .list-group-item > & {\n float: right;\n }\n\n .list-group-item > & + & {\n margin-right: 5px;\n }\n\n .nav-pills > li > a > & {\n margin-left: 3px;\n }\n}\n","//\n// Jumbotron\n// --------------------------------------------------\n\n\n.jumbotron {\n padding-top: @jumbotron-padding;\n padding-bottom: @jumbotron-padding;\n margin-bottom: @jumbotron-padding;\n color: @jumbotron-color;\n background-color: @jumbotron-bg;\n\n h1,\n .h1 {\n color: @jumbotron-heading-color;\n }\n\n p {\n margin-bottom: (@jumbotron-padding / 2);\n font-size: @jumbotron-font-size;\n font-weight: 200;\n }\n\n > hr {\n border-top-color: darken(@jumbotron-bg, 10%);\n }\n\n .container &,\n .container-fluid & {\n border-radius: @border-radius-large; // Only round corners at higher resolutions if contained in a container\n padding-left: (@grid-gutter-width / 2);\n padding-right: (@grid-gutter-width / 2);\n }\n\n .container {\n max-width: 100%;\n }\n\n @media screen and (min-width: @screen-sm-min) {\n padding-top: (@jumbotron-padding * 1.6);\n padding-bottom: (@jumbotron-padding * 1.6);\n\n .container &,\n .container-fluid & {\n padding-left: (@jumbotron-padding * 2);\n padding-right: (@jumbotron-padding * 2);\n }\n\n h1,\n .h1 {\n font-size: @jumbotron-heading-font-size;\n }\n }\n}\n","//\n// Thumbnails\n// --------------------------------------------------\n\n\n// Mixin and adjust the regular image class\n.thumbnail {\n display: block;\n padding: @thumbnail-padding;\n margin-bottom: @line-height-computed;\n line-height: @line-height-base;\n background-color: @thumbnail-bg;\n border: 1px solid @thumbnail-border;\n border-radius: @thumbnail-border-radius;\n .transition(border .2s ease-in-out);\n\n > img,\n a > img {\n &:extend(.img-responsive);\n margin-left: auto;\n margin-right: auto;\n }\n\n // Add a hover state for linked versions only\n a&:hover,\n a&:focus,\n a&.active {\n border-color: @link-color;\n }\n\n // Image captions\n .caption {\n padding: @thumbnail-caption-padding;\n color: @thumbnail-caption-color;\n }\n}\n","//\n// Alerts\n// --------------------------------------------------\n\n\n// Base styles\n// -------------------------\n\n.alert {\n padding: @alert-padding;\n margin-bottom: @line-height-computed;\n border: 1px solid transparent;\n border-radius: @alert-border-radius;\n\n // Headings for larger alerts\n h4 {\n margin-top: 0;\n // Specified for the h4 to prevent conflicts of changing @headings-color\n color: inherit;\n }\n\n // Provide class for links that match alerts\n .alert-link {\n font-weight: @alert-link-font-weight;\n }\n\n // Improve alignment and spacing of inner content\n > p,\n > ul {\n margin-bottom: 0;\n }\n\n > p + p {\n margin-top: 5px;\n }\n}\n\n// Dismissible alerts\n//\n// Expand the right padding and account for the close button's positioning.\n\n.alert-dismissable, // The misspelled .alert-dismissable was deprecated in 3.2.0.\n.alert-dismissible {\n padding-right: (@alert-padding + 20);\n\n // Adjust close link position\n .close {\n position: relative;\n top: -2px;\n right: -21px;\n color: inherit;\n }\n}\n\n// Alternate styles\n//\n// Generate contextual modifier classes for colorizing the alert.\n\n.alert-success {\n .alert-variant(@alert-success-bg; @alert-success-border; @alert-success-text);\n}\n\n.alert-info {\n .alert-variant(@alert-info-bg; @alert-info-border; @alert-info-text);\n}\n\n.alert-warning {\n .alert-variant(@alert-warning-bg; @alert-warning-border; @alert-warning-text);\n}\n\n.alert-danger {\n .alert-variant(@alert-danger-bg; @alert-danger-border; @alert-danger-text);\n}\n","// Alerts\n\n.alert-variant(@background; @border; @text-color) {\n background-color: @background;\n border-color: @border;\n color: @text-color;\n\n hr {\n border-top-color: darken(@border, 5%);\n }\n .alert-link {\n color: darken(@text-color, 10%);\n }\n}\n","//\n// Progress bars\n// --------------------------------------------------\n\n\n// Bar animations\n// -------------------------\n\n// WebKit\n@-webkit-keyframes progress-bar-stripes {\n from { background-position: 40px 0; }\n to { background-position: 0 0; }\n}\n\n// Spec and IE10+\n@keyframes progress-bar-stripes {\n from { background-position: 40px 0; }\n to { background-position: 0 0; }\n}\n\n\n// Bar itself\n// -------------------------\n\n// Outer container\n.progress {\n overflow: hidden;\n height: @line-height-computed;\n margin-bottom: @line-height-computed;\n background-color: @progress-bg;\n border-radius: @progress-border-radius;\n .box-shadow(inset 0 1px 2px rgba(0,0,0,.1));\n}\n\n// Bar of progress\n.progress-bar {\n float: left;\n width: 0%;\n height: 100%;\n font-size: @font-size-small;\n line-height: @line-height-computed;\n color: @progress-bar-color;\n text-align: center;\n background-color: @progress-bar-bg;\n .box-shadow(inset 0 -1px 0 rgba(0,0,0,.15));\n .transition(width .6s ease);\n}\n\n// Striped bars\n//\n// `.progress-striped .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar-striped` class, which you just add to an existing\n// `.progress-bar`.\n.progress-striped .progress-bar,\n.progress-bar-striped {\n #gradient > .striped();\n background-size: 40px 40px;\n}\n\n// Call animation for the active one\n//\n// `.progress.active .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar.active` approach.\n.progress.active .progress-bar,\n.progress-bar.active {\n .animation(progress-bar-stripes 2s linear infinite);\n}\n\n\n// Variations\n// -------------------------\n\n.progress-bar-success {\n .progress-bar-variant(@progress-bar-success-bg);\n}\n\n.progress-bar-info {\n .progress-bar-variant(@progress-bar-info-bg);\n}\n\n.progress-bar-warning {\n .progress-bar-variant(@progress-bar-warning-bg);\n}\n\n.progress-bar-danger {\n .progress-bar-variant(@progress-bar-danger-bg);\n}\n","// Gradients\n\n#gradient {\n\n // Horizontal gradient, from left to right\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n background-repeat: repeat-x;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down\n }\n\n // Vertical gradient, from top to bottom\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n background-repeat: repeat-x;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down\n }\n\n .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n background-repeat: repeat-x;\n background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n }\n .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n background-repeat: no-repeat;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n }\n .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-repeat: no-repeat;\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n }\n .radial(@inner-color: #555; @outer-color: #333) {\n background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n background-image: radial-gradient(circle, @inner-color, @outer-color);\n background-repeat: no-repeat;\n }\n .striped(@color: rgba(255,255,255,.15); @angle: 45deg) {\n background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n }\n}\n","// Progress bars\n\n.progress-bar-variant(@color) {\n background-color: @color;\n\n // Deprecated parent class requirement as of v3.2.0\n .progress-striped & {\n #gradient > .striped();\n }\n}\n",".media {\n // Proper spacing between instances of .media\n margin-top: 15px;\n\n &:first-child {\n margin-top: 0;\n }\n}\n\n.media,\n.media-body {\n zoom: 1;\n overflow: hidden;\n}\n\n.media-body {\n width: 10000px;\n}\n\n.media-object {\n display: block;\n\n // Fix collapse in webkit from max-width: 100% and display: table-cell.\n &.img-thumbnail {\n max-width: none;\n }\n}\n\n.media-right,\n.media > .pull-right {\n padding-left: 10px;\n}\n\n.media-left,\n.media > .pull-left {\n padding-right: 10px;\n}\n\n.media-left,\n.media-right,\n.media-body {\n display: table-cell;\n vertical-align: top;\n}\n\n.media-middle {\n vertical-align: middle;\n}\n\n.media-bottom {\n vertical-align: bottom;\n}\n\n// Reset margins on headings for tighter default spacing\n.media-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n\n// Media list variation\n//\n// Undo default ul/ol styles\n.media-list {\n padding-left: 0;\n list-style: none;\n}\n","//\n// List groups\n// --------------------------------------------------\n\n\n// Base class\n//\n// Easily usable on
a",l.leadingWhitespace=3===a.firstChild.nodeType,l.tbody=!a.getElementsByTagName("tbody").length,l.htmlSerialize=!!a.getElementsByTagName("link").length,l.html5Clone="<:nav>"!==d.createElement("nav").cloneNode(!0).outerHTML,c.type="checkbox",c.checked=!0,b.appendChild(c),l.appendChecked=c.checked,a.innerHTML="",l.noCloneChecked=!!a.cloneNode(!0).lastChild.defaultValue,b.appendChild(a),c=d.createElement("input"),c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),a.appendChild(c),l.checkClone=a.cloneNode(!0).cloneNode(!0).lastChild.checked,l.noCloneEvent=!!a.addEventListener,a[n.expando]=1,l.attributes=!a.getAttribute(n.expando)}();var da={option:[1,""],legend:[1,"
","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:l.htmlSerialize?[0,"",""]:[1,"X
","
"]};da.optgroup=da.option,da.tbody=da.tfoot=da.colgroup=da.caption=da.thead,da.th=da.td;function ea(a,b){var c,d,e=0,f="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||n.nodeName(d,b)?f.push(d):n.merge(f,ea(d,b));return void 0===b||b&&n.nodeName(a,b)?n.merge([a],f):f}function fa(a,b){for(var c,d=0;null!=(c=a[d]);d++)n._data(c,"globalEval",!b||n._data(b[d],"globalEval"))}var ga=/<|&#?\w+;/,ha=/r;r++)if(g=a[r],g||0===g)if("object"===n.type(g))n.merge(q,g.nodeType?[g]:g);else if(ga.test(g)){i=i||p.appendChild(b.createElement("div")),j=($.exec(g)||["",""])[1].toLowerCase(),m=da[j]||da._default,i.innerHTML=m[1]+n.htmlPrefilter(g)+m[2],f=m[0];while(f--)i=i.lastChild;if(!l.leadingWhitespace&&aa.test(g)&&q.push(b.createTextNode(aa.exec(g)[0])),!l.tbody){g="table"!==j||ha.test(g)?""!==m[1]||ha.test(g)?0:i:i.firstChild,f=g&&g.childNodes.length;while(f--)n.nodeName(k=g.childNodes[f],"tbody")&&!k.childNodes.length&&g.removeChild(k)}n.merge(q,i.childNodes),i.textContent="";while(i.firstChild)i.removeChild(i.firstChild);i=p.lastChild}else q.push(b.createTextNode(g));i&&p.removeChild(i),l.appendChecked||n.grep(ea(q,"input"),ia),r=0;while(g=q[r++])if(d&&n.inArray(g,d)>-1)e&&e.push(g);else if(h=n.contains(g.ownerDocument,g),i=ea(p.appendChild(g),"script"),h&&fa(i),c){f=0;while(g=i[f++])_.test(g.type||"")&&c.push(g)}return i=null,p}!function(){var b,c,e=d.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(l[b]=c in a)||(e.setAttribute(c,"t"),l[b]=e.attributes[c].expando===!1);e=null}();var ka=/^(?:input|select|textarea)$/i,la=/^key/,ma=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,na=/^(?:focusinfocus|focusoutblur)$/,oa=/^([^.]*)(?:\.(.+)|)/;function pa(){return!0}function qa(){return!1}function ra(){try{return d.activeElement}catch(a){}}function sa(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)sa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=qa;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=n.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return"undefined"==typeof n||a&&n.event.triggered===a.type?void 0:n.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(G)||[""],h=b.length;while(h--)f=oa.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=n.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=n.event.special[o]||{},l=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},i),(m=g[o])||(m=g[o]=[],m.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,l):m.push(l),n.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n.hasData(a)&&n._data(a);if(r&&(k=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=oa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=m.length;while(f--)g=m[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(m.splice(f,1),g.selector&&m.delegateCount--,l.remove&&l.remove.call(a,g));i&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(k)&&(delete r.handle,n._removeData(a,"events"))}},trigger:function(b,c,e,f){var g,h,i,j,l,m,o,p=[e||d],q=k.call(b,"type")?b.type:b,r=k.call(b,"namespace")?b.namespace.split("."):[];if(i=m=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!na.test(q+n.event.triggered)&&(q.indexOf(".")>-1&&(r=q.split("."),q=r.shift(),r.sort()),h=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=r.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:n.makeArray(c,[b]),l=n.event.special[q]||{},f||!l.trigger||l.trigger.apply(e,c)!==!1)){if(!f&&!l.noBubble&&!n.isWindow(e)){for(j=l.delegateType||q,na.test(j+q)||(i=i.parentNode);i;i=i.parentNode)p.push(i),m=i;m===(e.ownerDocument||d)&&p.push(m.defaultView||m.parentWindow||a)}o=0;while((i=p[o++])&&!b.isPropagationStopped())b.type=o>1?j:l.bindType||q,g=(n._data(i,"events")||{})[b.type]&&n._data(i,"handle"),g&&g.apply(i,c),g=h&&i[h],g&&g.apply&&M(i)&&(b.result=g.apply(i,c),b.result===!1&&b.preventDefault());if(b.type=q,!f&&!b.isDefaultPrevented()&&(!l._default||l._default.apply(p.pop(),c)===!1)&&M(e)&&h&&e[q]&&!n.isWindow(e)){m=e[h],m&&(e[h]=null),n.event.triggered=q;try{e[q]()}catch(s){}n.event.triggered=void 0,m&&(e[h]=m)}return b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(n._data(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]","i"),va=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,wa=/\s*$/g,Aa=ca(d),Ba=Aa.appendChild(d.createElement("div"));function Ca(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function Da(a){return a.type=(null!==n.find.attr(a,"type"))+"/"+a.type,a}function Ea(a){var b=ya.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Fa(a,b){if(1===b.nodeType&&n.hasData(a)){var c,d,e,f=n._data(a),g=n._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)n.event.add(b,c,h[c][d])}g.data&&(g.data=n.extend({},g.data))}}function Ga(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!l.noCloneEvent&&b[n.expando]){e=n._data(b);for(d in e.events)n.removeEvent(b,d,e.handle);b.removeAttribute(n.expando)}"script"===c&&b.text!==a.text?(Da(b).text=a.text,Ea(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),l.html5Clone&&a.innerHTML&&!n.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&Z.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}}function Ha(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&xa.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),Ha(f,b,c,d)});if(o&&(k=ja(b,a[0].ownerDocument,!1,a,d),e=k.firstChild,1===k.childNodes.length&&(k=e),e||d)){for(i=n.map(ea(k,"script"),Da),h=i.length;o>m;m++)g=k,m!==p&&(g=n.clone(g,!0,!0),h&&n.merge(i,ea(g,"script"))),c.call(a[m],g,m);if(h)for(j=i[i.length-1].ownerDocument,n.map(i,Ea),m=0;h>m;m++)g=i[m],_.test(g.type||"")&&!n._data(g,"globalEval")&&n.contains(j,g)&&(g.src?n._evalUrl&&n._evalUrl(g.src):n.globalEval((g.text||g.textContent||g.innerHTML||"").replace(za,"")));k=e=null}return a}function Ia(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(ea(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&fa(ea(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(va,"<$1>")},clone:function(a,b,c){var d,e,f,g,h,i=n.contains(a.ownerDocument,a);if(l.html5Clone||n.isXMLDoc(a)||!ua.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(Ba.innerHTML=a.outerHTML,Ba.removeChild(f=Ba.firstChild)),!(l.noCloneEvent&&l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(d=ea(f),h=ea(a),g=0;null!=(e=h[g]);++g)d[g]&&Ga(e,d[g]);if(b)if(c)for(h=h||ea(a),d=d||ea(f),g=0;null!=(e=h[g]);g++)Fa(e,d[g]);else Fa(a,f);return d=ea(f,"script"),d.length>0&&fa(d,!i&&ea(a,"script")),d=h=e=null,f},cleanData:function(a,b){for(var d,e,f,g,h=0,i=n.expando,j=n.cache,k=l.attributes,m=n.event.special;null!=(d=a[h]);h++)if((b||M(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)m[e]?n.event.remove(d,e):n.removeEvent(d,e,g.handle);j[f]&&(delete j[f],k||"undefined"==typeof d.removeAttribute?d[i]=void 0:d.removeAttribute(i),c.push(f))}}}),n.fn.extend({domManip:Ha,detach:function(a){return Ia(this,a,!0)},remove:function(a){return Ia(this,a)},text:function(a){return Y(this,function(a){return void 0===a?n.text(this):this.empty().append((this[0]&&this[0].ownerDocument||d).createTextNode(a))},null,a,arguments.length)},append:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.appendChild(a)}})},prepend:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&n.cleanData(ea(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&n.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return Y(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(ta,""):void 0;if("string"==typeof a&&!wa.test(a)&&(l.htmlSerialize||!ua.test(a))&&(l.leadingWhitespace||!aa.test(a))&&!da[($.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ea(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return Ha(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(ea(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=0,e=[],f=n(a),h=f.length-1;h>=d;d++)c=d===h?this:this.clone(!0),n(f[d])[b](c),g.apply(e,c.get());return this.pushStack(e)}});var Ja,Ka={HTML:"block",BODY:"block"};function La(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function Ma(a){var b=d,c=Ka[a];return c||(c=La(a,b),"none"!==c&&c||(Ja=(Ja||n("