Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import java.util.Map;
import java.util.Properties;
import org.apache.commons.io.FileUtils;
import org.apache.pinot.integration.tests.custom.GroupByOptionsTest;
import org.apache.pinot.spi.config.table.TableConfig;
import org.apache.pinot.spi.config.table.TableType;
import org.apache.pinot.spi.data.FieldSpec;
Expand All @@ -41,7 +42,7 @@
import static org.apache.pinot.integration.tests.ClusterIntegrationTestUtils.getBrokerQueryApiUrl;


// similar to GroupByOptionsIntegrationTest but this test verifies that default enable group trim option works even
// similar to GroupByOptionsTest but this test verifies that default enable group trim option works even
// if hint is not set in the query
public class GroupByEnableTrimOptionIntegrationTest extends BaseClusterIntegrationTestSet {

Expand Down Expand Up @@ -69,7 +70,7 @@ public void setUp()
TableConfig tableConfig = createOfflineTableConfig();
addTableConfig(tableConfig);

List<File> avroFiles = GroupByOptionsIntegrationTest.createAvroFile(_tempDir);
List<File> avroFiles = GroupByOptionsTest.createAvroFile(_tempDir);
ClusterIntegrationTestUtils.buildSegmentsFromAvro(avroFiles, tableConfig, schema, 0, _segmentDir, _tarDir);
uploadSegments(DEFAULT_TABLE_NAME, _tarDir);

Expand Down Expand Up @@ -206,8 +207,8 @@ public void assertResultAndPlan(String option, String query, String expectedResu
JsonNode result = postV2Query(sql);
JsonNode plan = postV2Query(option + " set explainAskingServers=true; explain plan for " + query);

Assert.assertEquals(GroupByOptionsIntegrationTest.toResultStr(result), expectedResult);
Assert.assertEquals(GroupByOptionsIntegrationTest.toExplainStr(plan, true), expectedPlan);
Assert.assertEquals(GroupByOptionsTest.toResultStr(result), expectedResult);
Assert.assertEquals(GroupByOptionsTest.toExplainStr(plan, true), expectedPlan);
}

private JsonNode postV2Query(String query)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.commons.io.FileUtils;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.pinot.integration.tests.custom.StarTreeTest;
import org.apache.pinot.segment.spi.Constants;
import org.apache.pinot.spi.config.table.StarTreeAggregationConfig;
import org.apache.pinot.spi.config.table.StarTreeIndexConfig;
Expand Down Expand Up @@ -226,7 +227,7 @@ private void waitForTableConfigUpdate(Function<TableConfig, Boolean> condition)
*/
private void checkQueryDoesNotUseStarTreeIndex(String query, int expectedResult) throws Exception {
JsonNode explainPlan = postQuery("EXPLAIN PLAN FOR " + query);
assertFalse(explainPlan.toString().contains(StarTreeClusterIntegrationTest.FILTER_STARTREE_INDEX));
assertFalse(explainPlan.toString().contains(StarTreeTest.FILTER_STARTREE_INDEX));
assertEquals(getDistinctCountResult(query), expectedResult);
}

Expand All @@ -244,7 +245,7 @@ private void checkQueryUsesStarTreeIndex(String query, int expectedResult) throw
} catch (Exception e) {
throw new RuntimeException(e);
}
return result.toString().contains(StarTreeClusterIntegrationTest.FILTER_STARTREE_INDEX);
return result.toString().contains(StarTreeTest.FILTER_STARTREE_INDEX);
}, 1000L, 10_000L, "Failed to use star-tree index for query: " + query
);
assertEquals(getDistinctCountResult(query), expectedResult);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.integration.tests.custom;

import com.fasterxml.jackson.databind.JsonNode;
import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import javax.annotation.Nullable;
import org.apache.pinot.spi.config.table.IndexingConfig;
import org.apache.pinot.spi.config.table.TableConfig;
import org.apache.pinot.spi.data.FieldSpec.DataType;
import org.apache.pinot.spi.data.Schema;
import org.apache.pinot.util.TestUtils;
import org.testng.annotations.Test;

import static org.testng.Assert.assertEquals;


/**
* Integration test that enables aggregate metrics for the LLC real-time table.
*/
@Test(suiteName = "CustomClusterIntegrationTest")
public class AggregateMetricsTest extends CustomDataQueryClusterIntegrationTest {

private static final long EXPECTED_SUM_AIR_TIME = -165429728L;
private static final long EXPECTED_SUM_ARR_DELAY = -175625957L;

@Override
public String getTableName() {
return "AggregateMetricsTest";
}

@Override
public boolean isRealtimeTable() {
return true;
}

@Override
public Schema createSchema() {
return new Schema.SchemaBuilder().setSchemaName(getTableName())
.addSingleValueDimension("Carrier", DataType.STRING)
.addSingleValueDimension("Origin", DataType.STRING)
.addMetric("AirTime", DataType.LONG)
.addMetric("ArrDelay", DataType.DOUBLE)
.addDateTime("DaysSinceEpoch", DataType.INT, "1:DAYS:EPOCH", "1:DAYS")
.build();
}

@Override
public List<File> createAvroFiles()
throws Exception {
return unpackAvroData(_tempDir);
}

@Override
protected TableConfig createRealtimeTableConfig(File sampleAvroFile) {
TableConfig tableConfig = super.createRealtimeTableConfig(sampleAvroFile);
IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
indexingConfig.setSortedColumn(Collections.singletonList("Carrier"));
indexingConfig.setInvertedIndexColumns(Collections.singletonList("Origin"));
indexingConfig.setNoDictionaryColumns(Arrays.asList("AirTime", "ArrDelay"));
indexingConfig.setRangeIndexColumns(Collections.singletonList("DaysSinceEpoch"));
indexingConfig.setBloomFilterColumns(Collections.singletonList("Origin"));
indexingConfig.setAggregateMetrics(true);
return tableConfig;
}

@Nullable
@Override
protected String getSortedColumn() {
return null;
}

@Override
public String getTimeColumnName() {
return "DaysSinceEpoch";
}

@Override
protected void waitForAllDocsLoaded(long timeoutMs) {
// For aggregate metrics, documents can be merged during ingestion, so we check aggregation results
// instead of document count.
String sql = "SELECT SUM(AirTime), SUM(ArrDelay) FROM " + getTableName();
TestUtils.waitForCondition(aVoid -> {
try {
JsonNode queryResult = postQuery(sql);
JsonNode aggregationResults = queryResult.get("resultTable").get("rows").get(0);
return aggregationResults.get(0).asLong() == EXPECTED_SUM_AIR_TIME
&& aggregationResults.get(1).asLong() == EXPECTED_SUM_ARR_DELAY;
} catch (Exception e) {
return null;
}
}, 100L, timeoutMs, "Failed to load all documents");
}

@Test
public void testAggregateMetricsQueries()
throws Exception {
// Test total aggregation
JsonNode result = postQuery("SELECT SUM(AirTime), SUM(ArrDelay) FROM " + getTableName());
JsonNode rows = result.get("resultTable").get("rows").get(0);
assertEquals(rows.get(0).asLong(), EXPECTED_SUM_AIR_TIME);
assertEquals(rows.get(1).asLong(), EXPECTED_SUM_ARR_DELAY);

// Test group by with order
result = postQuery("SELECT SUM(AirTime), DaysSinceEpoch FROM " + getTableName()
+ " GROUP BY DaysSinceEpoch ORDER BY SUM(AirTime) DESC LIMIT 1");
assertEquals(result.get("exceptions").size(), 0);

// Test filter with group by
result = postQuery("SELECT Origin, SUM(ArrDelay) FROM " + getTableName()
+ " WHERE Carrier = 'AA' GROUP BY Origin ORDER BY Origin LIMIT 10");
assertEquals(result.get("exceptions").size(), 0);
}
}
Loading
Loading