Skip to content
Permalink
Browse files
Merge branch 'gerrit/neo'
Change-Id: Id22e3b96e31924a90c2cc7c7dee2aa828ba18ac6
  • Loading branch information
mblow committed Feb 15, 2022
2 parents b3fb199 + a6c858b commit 349915efbc05b318236dc0d18f132a0be3eee491
Showing 13 changed files with 42 additions and 23 deletions.
@@ -45,6 +45,7 @@
import org.apache.asterix.translator.ResultProperties;
import org.apache.asterix.translator.SessionOutput;
import org.apache.hyracks.api.application.INCServiceContext;
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.exceptions.Warning;
import org.apache.hyracks.api.util.ExceptionUtils;
import org.apache.hyracks.http.api.IChannelClosedHandler;
@@ -76,6 +77,7 @@ protected void executeStatement(IServletRequest request, IRequestReference reque
QueryServiceRequestParameters param, RequestExecutionState executionState,
Map<String, String> optionalParameters, Map<String, byte[]> statementParameters,
ResponsePrinter responsePrinter, List<Warning> warnings) throws Exception {
ensureOptionalParameters(optionalParameters);
// Running on NC -> send 'execute' message to CC
INCServiceContext ncCtx = (INCServiceContext) serviceCtx;
INCMessageBroker ncMb = (INCMessageBroker) ncCtx.getMessageBroker();
@@ -134,6 +136,10 @@ protected void executeStatement(IServletRequest request, IRequestReference reque
buildResponseResults(responsePrinter, sessionOutput, responseMsg.getExecutionPlans(), warnings);
}

protected void ensureOptionalParameters(Map<String, String> optionalParameters) throws HyracksDataException {

}

protected ExecuteStatementRequestMessage createRequestMessage(IServletRequest request,
IRequestReference requestReference, String statementsText, SessionOutput sessionOutput,
ResultProperties resultProperties, QueryServiceRequestParameters param,
@@ -295,8 +295,7 @@ private void writeException(Exception e, IServletResponse response) {
responseWriter.flush();
}

protected boolean isRequestPermittedForWrite(IServletRequest request, IServletResponse response)
throws IOException {
protected boolean isRequestPermitted(IServletRequest request, IServletResponse response) throws IOException {
if (!isRequestOnLoopback(request)) {
rejectForbidden(response);
return false;
@@ -322,14 +321,14 @@ protected void rejectForbidden(IServletResponse response) throws IOException {

@Override
protected void post(IServletRequest request, IServletResponse response) throws IOException {
if (isRequestPermittedForWrite(request, response)) {
if (isRequestPermitted(request, response)) {
handleModification(request, response, LibraryOperation.UPSERT);
}
}

@Override
protected void delete(IServletRequest request, IServletResponse response) throws IOException {
if (isRequestPermittedForWrite(request, response)) {
if (isRequestPermitted(request, response)) {
handleModification(request, response, LibraryOperation.DELETE);
}
}
@@ -57,14 +57,14 @@ public class ReplicaManager implements IReplicaManager {
* current replicas
*/
private final Map<ReplicaIdentifier, PartitionReplica> replicas = new HashMap<>();
private final Set<Integer> nodeOwnedPartitions = new HashSet<>();
private final Set<Integer> nodeOriginatedPartitions = new HashSet<>();

public ReplicaManager(INcApplicationContext appCtx, Set<Integer> partitions) {
this.appCtx = appCtx;
for (Integer partition : partitions) {
this.partitions.put(partition, new Object());
}
setNodeOwnedPartitions(appCtx);
setNodeOriginatedPartitions(appCtx);
}

@Override
@@ -163,8 +163,8 @@ public synchronized List<IPartitionReplica> getReplicas() {
}

@Override
public boolean isPartitionOwner(int partition) {
return nodeOwnedPartitions.contains(partition);
public boolean isPartitionOrigin(int partition) {
return nodeOriginatedPartitions.contains(partition);
}

public void closePartitionResources(int partition) throws HyracksDataException {
@@ -185,9 +185,9 @@ private boolean isSelf(ReplicaIdentifier id) {
return id.getNodeId().equals(nodeId);
}

private void setNodeOwnedPartitions(INcApplicationContext appCtx) {
private void setNodeOriginatedPartitions(INcApplicationContext appCtx) {
Set<Integer> nodePartitions =
appCtx.getMetadataProperties().getNodePartitions(appCtx.getServiceContext().getNodeId());
nodeOwnedPartitions.addAll(nodePartitions);
nodeOriginatedPartitions.addAll(nodePartitions);
}
}
@@ -195,12 +195,14 @@ public synchronized Map<Long, IndexInfo> getIndexes() {
public synchronized void addIndex(long resourceID, IndexInfo indexInfo) {
indexes.put(resourceID, indexInfo);
partitionIndexes.computeIfAbsent(indexInfo.getPartition(), partition -> new HashSet<>()).add(indexInfo);
LOGGER.debug("registered reference to index {}", indexInfo);
}

public synchronized void removeIndex(long resourceID) {
IndexInfo info = indexes.remove(resourceID);
if (info != null) {
partitionIndexes.get(info.getPartition()).remove(info);
LOGGER.debug("removed reference to index {}", info);
}
}

@@ -55,4 +55,10 @@ public int getDatasetId() {
public LocalResource getLocalResource() {
return localResource;
}

@Override
public String toString() {
return "IndexInfo{" + "index=" + index + ", datasetId=" + datasetId + ", resourceId=" + resourceId
+ ", partition=" + partition + ", localResource=" + localResource + '}';
}
}
@@ -104,10 +104,10 @@ public interface IReplicaManager {
List<IPartitionReplica> getReplicas();

/**
* Returns true if {@code partition} is owned by this node, otherwise false.
* Returns true if {@code partition} is originated by this node, otherwise false.
*
* @param partition
* @return true if the partition is owned by this node, otherwise false.
* @return true if the partition is originated by this node, otherwise false.
*/
boolean isPartitionOwner(int partition);
boolean isPartitionOrigin(int partition);
}
@@ -90,7 +90,7 @@ public static PartitionResourcesListResponse create(DataInput input) throws IOEx
return new PartitionResourcesListResponse(partition, partitionReplicatedResources, resources, owner);
}

public boolean isOwner() {
public boolean isOrigin() {
return owner;
}

@@ -59,7 +59,7 @@ public void perform(INcApplicationContext appCtx, IReplicationWorker worker) thr
localResourceRepository.getPartitionReplicatedFiles(partition, replicationStrategy).stream()
.map(StoragePathUtil::getFileRelativePath).collect(Collectors.toList());
final PartitionResourcesListResponse response = new PartitionResourcesListResponse(partition,
partitionReplicatedResources, partitionFiles, appCtx.getReplicaManager().isPartitionOwner(partition));
partitionReplicatedResources, partitionFiles, appCtx.getReplicaManager().isPartitionOrigin(partition));
ReplicationProtocol.sendTo(worker.getChannel(), response, worker.getReusableBuffer());
}

@@ -55,7 +55,7 @@ public void replicate(String file, boolean metadata) {
final IIOManager ioManager = appCtx.getIoManager();
final ISocketChannel channel = replica.getChannel();
final FileReference filePath = ioManager.resolve(file);
String masterNode = appCtx.getReplicaManager().isPartitionOwner(replica.getIdentifier().getPartition())
String masterNode = appCtx.getReplicaManager().isPartitionOrigin(replica.getIdentifier().getPartition())
? appCtx.getServiceContext().getNodeId() : null;
ReplicateFileTask task = new ReplicateFileTask(file, filePath.getFile().length(), metadata, masterNode);
LOGGER.debug("attempting to replicate {} to replica {}", task, replica);
@@ -94,7 +94,7 @@ private void replicateComponent(PartitionReplica replica) throws IOException {
final FileSynchronizer fileSynchronizer = new FileSynchronizer(appCtx, replica);
job.getJobFiles().stream().map(StoragePathUtil::getFileRelativePath).forEach(fileSynchronizer::replicate);
// send mark component valid
String masterNode = appCtx.getReplicaManager().isPartitionOwner(replica.getIdentifier().getPartition())
String masterNode = appCtx.getReplicaManager().isPartitionOrigin(replica.getIdentifier().getPartition())
? appCtx.getServiceContext().getNodeId() : null;
MarkComponentValidTask markValidTask = new MarkComponentValidTask(indexFile, getReplicatedComponentLsn(),
getReplicatedComponentId(), masterNode);
@@ -72,7 +72,7 @@ public void sync() throws IOException {
}
PartitionResourcesListResponse replicaResourceResponse = getReplicaFiles(partition);
Map<ResourceReference, Long> resourceReferenceLongMap = getValidReplicaResources(
replicaResourceResponse.getPartitionReplicatedResources(), replicaResourceResponse.isOwner());
replicaResourceResponse.getPartitionReplicatedResources(), replicaResourceResponse.isOrigin());
// clean up files for invalid resources (deleted or recreated while the replica was down)
Set<String> deletedReplicaFiles =
cleanupReplicaInvalidResources(replicaResourceResponse, resourceReferenceLongMap);
@@ -155,7 +155,7 @@ private Set<String> cleanupReplicaInvalidResources(PartitionResourcesListRespons
if (!validReplicaResources.containsKey(replicaRes)) {
LOGGER.debug("replica invalid file {} to be deleted", replicaRes.getFileRelativePath());
invalidFiles.add(replicaResPath);
} else if (replicaResourceResponse.isOwner() && !replicaRes.isMetadataResource()) {
} else if (replicaResourceResponse.isOrigin() && !replicaRes.isMetadataResource()) {
// find files where the owner generated and failed before replicating
Long masterValidSeq = validReplicaResources.get(replicaRes);
IndexComponentFileReference componentFileReference =
@@ -184,7 +184,7 @@ private PartitionResourcesListResponse getReplicaFiles(int partition) throws IOE
}

private Map<ResourceReference, Long> getValidReplicaResources(Map<String, Long> partitionReplicatedResources,
boolean owner) throws HyracksDataException {
boolean origin) throws HyracksDataException {
Map<ResourceReference, Long> resource2ValidSeqMap = new HashMap<>();
for (Map.Entry<String, Long> resourceEntry : partitionReplicatedResources.entrySet()) {
ResourceReference rr = ResourceReference.of(resourceEntry.getKey());
@@ -196,7 +196,7 @@ private Map<ResourceReference, Long> getValidReplicaResources(Map<String, Long>
LOGGER.info("replica has resource {} but with different resource id; ours {}, theirs {}", rr,
localResource.getId(), resourceEntry.getValue());
} else {
long resourceMasterValidSeq = owner ? getResourceMasterValidSeq(rr) : Integer.MAX_VALUE;
long resourceMasterValidSeq = origin ? getResourceMasterValidSeq(rr) : Integer.MAX_VALUE;
resource2ValidSeqMap.put(rr, resourceMasterValidSeq);
}
}
@@ -73,7 +73,7 @@ private void syncFiles(boolean deltaRecovery) throws IOException {
private void checkpointReplicaIndexes() throws IOException {
final int partition = replica.getIdentifier().getPartition();
String masterNode =
appCtx.getReplicaManager().isPartitionOwner(partition) ? appCtx.getServiceContext().getNodeId() : null;
appCtx.getReplicaManager().isPartitionOrigin(partition) ? appCtx.getServiceContext().getNodeId() : null;
CheckpointPartitionIndexesTask task =
new CheckpointPartitionIndexesTask(partition, getPartitionMaxComponentId(partition), masterNode);
ReplicationProtocol.sendTo(replica, task);
@@ -236,7 +236,13 @@ public void close() throws IOException {
try {
return readFuture.get();
} catch (InterruptedException ex) { // NOSONAR -- interrupt or rethrow
response.close();
executor.submit(() -> {
try {
response.close();
} catch (IOException e) {
LOGGER.debug("{} ignoring exception thrown on stream close due to interrupt", description, e);
}
});
try {
readFuture.get(1, TimeUnit.SECONDS);
} catch (TimeoutException te) {

0 comments on commit 349915e

Please sign in to comment.