Skip to content

Commit

Permalink
[ASTERIXDB-3144][RT] Pass partitions map to inverted index
Browse files Browse the repository at this point in the history
- user model changes: no
- storage format changes: no
- interface changes: no

Details:
Pass partitions map to the inverted index runtime.
- rename few methods.

Change-Id: I6ad1b0cd79f0f5e8e15da83330b8a52f9ac0108d
Reviewed-on: https://asterix-gerrit.ics.uci.edu/c/asterixdb/+/17463
Integration-Tests: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Tested-by: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Reviewed-by: Ali Alsuliman <ali.al.solaiman@gmail.com>
Reviewed-by: Murtadha Hubail <mhubail@apache.org>
  • Loading branch information
AliSolaiman committed Apr 6, 2023
1 parent dfcb99f commit ba2d8bd
Show file tree
Hide file tree
Showing 18 changed files with 178 additions and 175 deletions.
Expand Up @@ -162,7 +162,7 @@ public void contributeRuntimeOperator(IHyracksJobBuilder builder, JobGenContext
String.valueOf(unnestMap.getOperatorTag()));
}

Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> btreeSearch = metadataProvider.buildBtreeRuntime(
Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> btreeSearch = metadataProvider.getBtreeSearchRuntime(
builder.getJobSpec(), opSchema, typeEnv, context, jobGenParams.getRetainInput(), retainMissing,
nonMatchWriterFactory, dataset, jobGenParams.getIndexName(), lowKeyIndexes, highKeyIndexes,
jobGenParams.isLowKeyInclusive(), jobGenParams.isHighKeyInclusive(), propagateFilter,
Expand Down
Expand Up @@ -186,14 +186,17 @@ public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildInvertedInd
IIndexDataflowHelperFactory dataflowHelperFactory = new IndexDataflowHelperFactory(
metadataProvider.getStorageComponentProvider().getStorageManager(), secondarySplitsAndConstraint.first);

LSMInvertedIndexSearchOperatorDescriptor invIndexSearchOp = new LSMInvertedIndexSearchOperatorDescriptor(
jobSpec, outputRecDesc, queryField, dataflowHelperFactory, queryTokenizerFactory,
fullTextConfigEvaluatorFactory, searchModifierFactory, retainInput, retainMissing,
nonMatchWriterFactory,
dataset.getSearchCallbackFactory(metadataProvider.getStorageComponentProvider(), secondaryIndex,
IndexOperation.SEARCH, null),
minFilterFieldIndexes, maxFilterFieldIndexes, isFullTextSearchQuery, numPrimaryKeys,
propagateIndexFilter, nonFilterWriterFactory, frameLimit);
int numPartitions = MetadataProvider.getNumPartitions(secondarySplitsAndConstraint.second);
int[][] partitionsMap = MetadataProvider.getPartitionsMap(numPartitions);

LSMInvertedIndexSearchOperatorDescriptor invIndexSearchOp =
new LSMInvertedIndexSearchOperatorDescriptor(jobSpec, outputRecDesc, queryField, dataflowHelperFactory,
queryTokenizerFactory, fullTextConfigEvaluatorFactory, searchModifierFactory, retainInput,
retainMissing, nonMatchWriterFactory,
dataset.getSearchCallbackFactory(metadataProvider.getStorageComponentProvider(), secondaryIndex,
IndexOperation.SEARCH, null),
minFilterFieldIndexes, maxFilterFieldIndexes, isFullTextSearchQuery, numPrimaryKeys,
propagateIndexFilter, nonFilterWriterFactory, frameLimit, partitionsMap);
return new Pair<>(invIndexSearchOp, secondarySplitsAndConstraint.second);
}
}
Expand Up @@ -106,7 +106,7 @@ public void contributeRuntimeOperator(IHyracksJobBuilder builder, JobGenContext
}

Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> rtreeSearch =
mp.buildRtreeRuntime(builder.getJobSpec(), outputVars, opSchema, typeEnv, context,
mp.getRtreeSearchRuntime(builder.getJobSpec(), outputVars, opSchema, typeEnv, context,
jobGenParams.getRetainInput(), retainMissing, nonMatchWriterFactory, dataset,
jobGenParams.getIndexName(), keyIndexes, propagateIndexFilter, nonFilterWriterFactory,
minFilterFieldIndexes, maxFilterFieldIndexes, unnestMap.getGenerateCallBackProceedResultVar());
Expand Down
Expand Up @@ -105,9 +105,9 @@ public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceS
IVariableTypeEnvironment typeEnv, JobGenContext context, JobSpecification jobSpec, Object implConfig,
IProjectionFiltrationInfo<?> projectionInfo, IProjectionFiltrationInfo<?> metaProjectionInfo)
throws AlgebricksException {
return metadataProvider.buildBtreeRuntime(jobSpec, opSchema, typeEnv, context, true, false, null, ds, indexName,
null, null, true, true, false, null, null, null, tupleFilterFactory, outputLimit, false, false,
DefaultTupleProjectorFactory.INSTANCE, false);
return metadataProvider.getBtreeSearchRuntime(jobSpec, opSchema, typeEnv, context, true, false, null, ds,
indexName, null, null, true, true, false, null, null, null, tupleFilterFactory, outputLimit, false,
false, DefaultTupleProjectorFactory.INSTANCE, false);
}

@Override
Expand Down
Expand Up @@ -142,7 +142,7 @@ private static Pair<JobSpecification, ITypedAdapterFactory> buildFeedIntakeJobSp
IOperatorDescriptor feedIngestor;
AlgebricksPartitionConstraint ingesterPc;
Triple<IOperatorDescriptor, AlgebricksPartitionConstraint, ITypedAdapterFactory> t =
metadataProvider.buildFeedIntakeRuntime(spec, feed, policyAccessor);
metadataProvider.getFeedIntakeRuntime(spec, feed, policyAccessor);
feedIngestor = t.first;
ingesterPc = t.second;
adapterFactory = t.third;
Expand Down
Expand Up @@ -139,7 +139,7 @@ public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceS
properties.put(KEY_EXTERNAL_SCAN_BUFFER_SIZE, String.valueOf(externalScanBufferSize));
ITypedAdapterFactory adapterFactory = metadataProvider.getConfiguredAdapterFactory(externalDataset,
edd.getAdapter(), properties, (ARecordType) itemType, null, context.getWarningCollector());
return metadataProvider.buildExternalDatasetDataScannerRuntime(jobSpec, itemType, adapterFactory,
return metadataProvider.getExternalDatasetScanRuntime(jobSpec, itemType, adapterFactory,
tupleFilterFactory, outputLimit);
case INTERNAL:
DataSourceId id = getId();
Expand All @@ -163,7 +163,7 @@ public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceS

int[] minFilterFieldIndexes = createFilterIndexes(minFilterVars, opSchema);
int[] maxFilterFieldIndexes = createFilterIndexes(maxFilterVars, opSchema);
return metadataProvider.buildBtreeRuntime(jobSpec, opSchema, typeEnv, context, true, false, null,
return metadataProvider.getBtreeSearchRuntime(jobSpec, opSchema, typeEnv, context, true, false, null,
((DatasetDataSource) dataSource).getDataset(), primaryIndex.getIndexName(), null, null, true,
true, false, null, minFilterFieldIndexes, maxFilterFieldIndexes, tupleFilterFactory,
outputLimit, false, false, tupleProjectorFactory, false);
Expand Down
Expand Up @@ -115,8 +115,8 @@ public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceS
dataParserFactory.setRecordType(RecordUtil.FULLY_OPEN_RECORD_TYPE);
dataParserFactory.configure(Collections.emptyMap());
adapterFactory.configure(factory, dataParserFactory);
return metadataProvider.buildExternalDatasetDataScannerRuntime(jobSpec, itemType, adapterFactory,
tupleFilterFactory, outputLimit);
return metadataProvider.getExternalDatasetScanRuntime(jobSpec, itemType, adapterFactory, tupleFilterFactory,
outputLimit);
}

protected abstract IDatasourceFunction createFunction(MetadataProvider metadataProvider,
Expand Down
Expand Up @@ -147,7 +147,7 @@ public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceS
ITypedAdapterFactory adapterFactory = metadataProvider.getConfiguredAdapterFactory(alds.getTargetDataset(),
alds.getAdapter(), alds.getAdapterProperties(), itemType, null, context.getWarningCollector());
RecordDescriptor rDesc = JobGenHelper.mkRecordDescriptor(typeEnv, opSchema, context);
return metadataProvider.buildLoadableDatasetScan(jobSpec, adapterFactory, rDesc);
return metadataProvider.getLoadableDatasetScanRuntime(jobSpec, adapterFactory, rDesc);
}

@Override
Expand Down

0 comments on commit ba2d8bd

Please sign in to comment.