Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation …
…based on UUID work."

This reverts commit 8bb3bcab2bbb2a582ed0db25c86881a9e07803b7.
  • Loading branch information
Aravindan Vijayan committed May 11, 2018
1 parent 77b28f9 commit 5c303d06558c4f609bcd1f9b46e892a00bd3b418
Showing 21 changed files with 316 additions and 815 deletions.
@@ -17,6 +17,7 @@
*/
package org.apache.ambari.metrics.core.timeline;

import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.DEFAULT_TOPN_HOSTS_LIMIT;
import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.USE_GROUPBY_AGGREGATOR_QUERIES;
import static org.apache.ambari.metrics.core.timeline.availability.AggregationTaskRunner.ACTUAL_AGGREGATOR_NAMES;

@@ -34,6 +35,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
@@ -109,18 +111,15 @@ private TimelineMetricDistributedCache startCacheNode() throws MalformedURLExcep
private synchronized void initializeSubsystem() {
if (!isInitialized) {
hBaseAccessor = new PhoenixHBaseAccessor(null);

// Initialize metadata
// Initialize schema
hBaseAccessor.initMetricSchema();
// Initialize metadata from store
try {
metricMetadataManager = new TimelineMetricMetadataManager(hBaseAccessor);
} catch (MalformedURLException | URISyntaxException e) {
throw new ExceptionInInitializerError("Unable to initialize metadata manager");
}
metricMetadataManager.initializeMetadata();

// Initialize metric schema
hBaseAccessor.initMetricSchema();

// Initialize policies before TTL update
hBaseAccessor.initPoliciesAndTTL();
// Start HA service
@@ -396,10 +395,6 @@ static Multimap<String, List<Function>> parseMetricNamesToAggregationFunctions(L
return metricsFunctions;
}

public void putMetricsSkipCache(TimelineMetrics metrics) throws SQLException, IOException {
hBaseAccessor.insertMetricRecordsWithMetadata(metricMetadataManager, metrics, true);
}

@Override
public TimelinePutResponse putMetrics(TimelineMetrics metrics) throws SQLException, IOException {
// Error indicated by the Sql exception

Large diffs are not rendered by default.

@@ -237,6 +237,12 @@ public class TimelineMetricConfiguration {
public static final String WATCHER_MAX_FAILURES =
"timeline.metrics.service.watcher.max.failures";

public static final String PRECISION_TABLE_SPLIT_POINTS =
"timeline.metrics.host.aggregate.splitpoints";

public static final String AGGREGATE_TABLE_SPLIT_POINTS =
"timeline.metrics.cluster.aggregate.splitpoints";

public static final String AGGREGATORS_SKIP_BLOCK_CACHE =
"timeline.metrics.aggregators.skip.blockcache.enabled";

@@ -255,6 +261,12 @@ public class TimelineMetricConfiguration {
public static final String TIMELINE_METRICS_SINK_COLLECTION_PERIOD =
"timeline.metrics.sink.collection.period";

public static final String TIMELINE_METRICS_PRECISION_TABLE_DURABILITY =
"timeline.metrics.precision.table.durability";

public static final String TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY =
"timeline.metrics.aggregate.tables.durability";

public static final String TIMELINE_METRICS_WHITELIST_ENABLED =
"timeline.metrics.whitelisting.enabled";

@@ -273,9 +285,33 @@ public class TimelineMetricConfiguration {
public static final String TIMELINE_METRICS_APPS_WHITELIST =
"timeline.metrics.apps.whitelist";

public static final String HBASE_BLOCKING_STORE_FILES =
"hbase.hstore.blockingStoreFiles";

public static final String DEFAULT_TOPN_HOSTS_LIMIT =
"timeline.metrics.default.topn.hosts.limit";

public static final String TIMELINE_METRIC_AGGREGATION_SQL_FILTERS =
"timeline.metrics.cluster.aggregation.sql.filters";

public static final String TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_KEY =
"timeline.metrics.hbase.aggregate.table.compaction.policy.key";

public static final String TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_CLASS =
"timeline.metrics.hbase.aggregate.table.compaction.policy.class";

public static final String TIMELINE_METRICS_AGGREGATE_TABLE_HBASE_BLOCKING_STORE_FILES =
"timeline.metrics.aggregate.table.hbase.hstore.blockingStoreFiles";

public static final String TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_KEY =
"timeline.metrics.hbase.precision.table.compaction.policy.key";

public static final String TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_CLASS =
"timeline.metrics.hbase.precision.table.compaction.policy.class";

public static final String TIMELINE_METRICS_PRECISION_TABLE_HBASE_BLOCKING_STORE_FILES =
"timeline.metrics.precision.table.hbase.hstore.blockingStoreFiles";

public static final String TIMELINE_METRICS_SUPPORT_MULTIPLE_CLUSTERS =
"timeline.metrics.support.multiple.clusters";

@@ -310,23 +346,14 @@ public class TimelineMetricConfiguration {

public static final String TRANSIENT_METRIC_PATTERNS = "timeline.metrics.transient.metric.patterns";

public static final String TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS = "timeline.metrics.initial.configured.master.components";
public static final String TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS = "timeline.metrics.initial.configured.slave.components";

public static final String KAFKA_SERVERS = "timeline.metrics.external.sink.kafka.bootstrap.servers";
public static final String KAFKA_ACKS = "timeline.metrics.external.sink.kafka.acks";
public static final String KAFKA_RETRIES = "timeline.metrics.external.sink.kafka.bootstrap.retries";
public static final String KAFKA_BATCH_SIZE = "timeline.metrics.external.sink.kafka.batch.size";
public static final String KAFKA_LINGER_MS = "timeline.metrics.external.sink.kafka.linger.ms";
public static final String KAFKA_BUFFER_MEM = "timeline.metrics.external.sink.kafka.buffer.memory";
public static final String KAFKA_SINK_TIMEOUT_SECONDS = "timeline.metrics.external.sink.kafka.timeout.seconds";

public static final String HSTORE_COMPACTION_CLASS_KEY = "hbase.hstore.defaultengine.compactionpolicy.class";
public static final String HSTORE_ENGINE_CLASS = "hbase.hstore.engine.class";
public static final String FIFO_COMPACTION_POLICY_CLASS = "org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy";
public static final String DATE_TIERED_COMPACTION_POLICY = "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine";
public static final String BLOCKING_STORE_FILES_KEY = "hbase.hstore.blockingStoreFiles";


private Configuration hbaseConf;
private Configuration metricsConf;
private Configuration metricsSslConf;

This file was deleted.

@@ -45,13 +45,13 @@ public class TimelineMetricStoreWatcher implements Runnable {
private static int failures = 0;
private final TimelineMetricConfiguration configuration;

private HBaseTimelineMetricsService timelineMetricStore;
private TimelineMetricStore timelineMetricStore;

//used to call timelineMetricStore blocking methods with timeout
private ExecutorService executor = Executors.newSingleThreadExecutor();


public TimelineMetricStoreWatcher(HBaseTimelineMetricsService timelineMetricStore,
public TimelineMetricStoreWatcher(TimelineMetricStore timelineMetricStore,
TimelineMetricConfiguration configuration) {
this.timelineMetricStore = timelineMetricStore;
this.configuration = configuration;
@@ -100,7 +100,7 @@ private boolean checkMetricStore() {

Callable<TimelineMetric> task = new Callable<TimelineMetric>() {
public TimelineMetric call() throws Exception {
timelineMetricStore.putMetricsSkipCache(metrics);
timelineMetricStore.putMetrics(metrics);
TimelineMetrics timelineMetrics = timelineMetricStore.getTimelineMetrics(
Collections.singletonList(FAKE_METRIC_NAME), Collections.singletonList(FAKE_HOSTNAME),
FAKE_APP_ID, null, startTime - delay * 2 * 1000,

0 comments on commit 5c303d0

Please sign in to comment.