Skip to content

Commit

Permalink
use pause / resume in DistributingDownstream
Browse files Browse the repository at this point in the history
This eliminates the Queue from the DistributingDownstream.
Instead the StreamBuckets are used directly to save the
rows.

In addition the Broadcasting/Modulo distribution has been
decoupled from the request handling.
  • Loading branch information
mfussenegger committed Aug 17, 2015
1 parent b7e8936 commit 557a6f5
Show file tree
Hide file tree
Showing 21 changed files with 798 additions and 722 deletions.
11 changes: 4 additions & 7 deletions sql/src/main/java/io/crate/action/job/ContextPreparer.java
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,7 @@
import io.crate.jobs.JobExecutionContext;
import io.crate.jobs.PageDownstreamContext;
import io.crate.metadata.Routing;
import io.crate.operation.NodeOperation;
import io.crate.operation.PageDownstream;
import io.crate.operation.PageDownstreamFactory;
import io.crate.operation.Paging;
import io.crate.operation.*;
import io.crate.operation.collect.JobCollectContext;
import io.crate.operation.collect.MapSideDataCollectOperation;
import io.crate.operation.count.CountOperation;
Expand Down Expand Up @@ -150,7 +147,7 @@ public Void visitCountPhase(CountPhase phase, PreparerContext context) {
@Override
public Void visitMergePhase(final MergePhase phase, final PreparerContext context) {
RamAccountingContext ramAccountingContext = RamAccountingContext.forExecutionPhase(circuitBreaker, phase);
ResultProvider downstream = resultProviderFactory.createDownstream(
RowDownstream downstream = resultProviderFactory.createDownstream(
context.nodeOperation,
phase.jobId(),
Paging.getWeightedPageSize(Paging.PAGE_SIZE, 1.0d / phase.executionNodes().size()));
Expand Down Expand Up @@ -188,10 +185,10 @@ public Void visitCollectPhase(final CollectPhase phase, final PreparerContext co
);
LOGGER.trace("{} setting node page size to: {}, numShards in total: {} shards on node: {}",
localNodeId, pageSize, numTotalShards, numShardsOnNode);
ResultProvider downstream = resultProviderFactory.createDownstream(context.nodeOperation, phase.jobId(), pageSize);
RowDownstream downstream = resultProviderFactory.createDownstream(context.nodeOperation, phase.jobId(), pageSize);

if (ExecutionPhases.hasDirectResponseDownstream(context.nodeOperation.downstreamNodes())) {
context.directResultFuture = downstream.result();
context.directResultFuture = ((ResultProvider) downstream).result();
}
final JobCollectContext jobCollectContext = new JobCollectContext(
context.jobId,
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/*
* Licensed to Crate.IO GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/

package io.crate.executor.transport.distributed;

import com.google.common.base.Throwables;
import io.crate.Streamer;
import io.crate.core.collections.Bucket;
import io.crate.core.collections.Row;
import io.crate.executor.transport.StreamBucket;

import java.io.IOException;
import java.util.ArrayList;

/**
* PageBuilder that returns N buckets where N is the number of buckets specified in the constructor.
* Internally only one bucket is built - the same instance is returned N number of times.
*/
public class BroadcastingPageBuilder implements PageBuilder {

private final int numBuckets;
private final StreamBucket.Builder bucketBuilder;
private final ArrayList<Bucket> builtBuckets;
private volatile int size = 0;

public BroadcastingPageBuilder(Streamer<?>[] streamers, int numBuckets) {
this.numBuckets = numBuckets;
this.bucketBuilder = new StreamBucket.Builder(streamers);
this.builtBuckets = new ArrayList<>(numBuckets);
}

@Override
public void add(Row row) {
try {
synchronized (this) {
bucketBuilder.add(row);
size++;
}
} catch (IOException e) {
throw Throwables.propagate(e);
}
}

@Override
public int size() {
return size;
}

@Override
public synchronized Iterable<Bucket> buildBuckets() {
final Bucket bucket;
try {
bucket = bucketBuilder.build();
bucketBuilder.reset();
} catch (IOException e) {
throw Throwables.propagate(e);
}
builtBuckets.clear();
for (int i = 0; i < numBuckets; i++) {
builtBuckets.add(bucket);
}
size = 0;
return builtBuckets;
}
}
Loading

0 comments on commit 557a6f5

Please sign in to comment.