Skip to content

Commit

Permalink
HDDS-3142. Create isolated enviornment for OM to test it without SCM. (
Browse files Browse the repository at this point in the history
  • Loading branch information
elek committed Mar 19, 2020
1 parent 7a48111 commit 281faf3
Show file tree
Hide file tree
Showing 7 changed files with 332 additions and 2 deletions.
34 changes: 34 additions & 0 deletions dev-support/byteman/mock-scm.btm
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

RULE mock scm block client
CLASS org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB
METHOD submitRequest
AT ENTRY
BIND client:org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB = $0;
result:org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos$SCMBlockLocationResponse = org.apache.hadoop.hdds.freon.FakeScmBlockLocationProtocolClient.submitRequest($1);
IF true
DO return result;
ENDRULE

RULE mock scm container client
CLASS org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB
METHOD submitRpcRequest
AT ENTRY
BIND client:org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB = $0;
result:org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos$ScmContainerLocationResponse = org.apache.hadoop.hdds.freon.FakeScmContainerLocationProtocolClient.submitRequest($1);
IF true
DO return result;
ENDRULE
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.freon;

import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.UUID;

import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Port;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Class to store pre-generated topology information for load-tests.
*/
public class FakeClusterTopology {

private static final Logger LOGGER =
LoggerFactory.getLogger(FakeClusterTopology.class);

public static final FakeClusterTopology INSTANCE = new FakeClusterTopology();

private List<DatanodeDetailsProto> datanodes = new ArrayList<>();

private List<Pipeline> pipelines = new ArrayList<>();

private Random random = new Random();

public FakeClusterTopology() {
try {
for (int i = 0; i < 9; i++) {
datanodes.add(createDatanode(i));
if ((i + 1) % 3 == 0) {
pipelines.add(Pipeline.newBuilder()
.setId(PipelineID.randomId().getProtobuf())
.setFactor(ReplicationFactor.THREE)
.setType(ReplicationType.RATIS)
.addMembers(getDatanode(i - 2))
.addMembers(getDatanode(i - 1))
.addMembers(getDatanode(i))
.build());
}
}
} catch (Exception ex) {
LOGGER.error("Can't initialize FakeClusterTopology", ex);
}
}

private DatanodeDetailsProto createDatanode(int index) {
return DatanodeDetailsProto.newBuilder()
.setUuid(UUID.randomUUID().toString())
.setHostName("localhost")
.setIpAddress("127.0.0.1")
.addPorts(
Port.newBuilder().setName("RATIS").setValue(1234))
.build();
}

public DatanodeDetailsProto getDatanode(int i) {
return datanodes.get(i);
}

public Pipeline getRandomPipeline() {
return pipelines.get(random.nextInt(pipelines.size()));
}

public List<DatanodeDetailsProto> getAllDatanodes() {
return datanodes;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.freon;

import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;

import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBlockID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto.Builder;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Fake SCM client to return a simulated block location.
*/
public final class FakeScmBlockLocationProtocolClient {

private static final Logger LOGGER =
LoggerFactory.getLogger(FakeScmBlockLocationProtocolClient.class);

public static final int BLOCK_PER_CONTAINER = 1000;

private static AtomicLong counter = new AtomicLong();

private FakeScmBlockLocationProtocolClient() {
}

public static SCMBlockLocationResponse submitRequest(
SCMBlockLocationRequest req)
throws IOException {
try {
if (req.getCmdType() == Type.GetScmInfo) {
return SCMBlockLocationResponse.newBuilder()
.setCmdType(req.getCmdType())
.setStatus(Status.OK)
.setSuccess(true)
.setGetScmInfoResponse(
GetScmInfoResponseProto.newBuilder()
.setScmId("scm-id")
.setClusterId("cluster-id")
.build()
)
.build();
} else if (req.getCmdType() == Type.AllocateScmBlock) {
Builder allocateBlockResponse =
AllocateScmBlockResponseProto.newBuilder();
for (int i = 0;
i < req.getAllocateScmBlockRequest().getNumBlocks(); i++) {
long seq = counter.incrementAndGet();

allocateBlockResponse.addBlocks(AllocateBlockResponse.newBuilder()
.setPipeline(FakeClusterTopology.INSTANCE.getRandomPipeline())
.setContainerBlockID(ContainerBlockID.newBuilder()
.setContainerID(seq / BLOCK_PER_CONTAINER)
.setLocalID(seq))
);
}
return SCMBlockLocationResponse.newBuilder()
.setCmdType(req.getCmdType())
.setStatus(Status.OK)
.setSuccess(true)
.setAllocateScmBlockResponse(
allocateBlockResponse
)
.build();
} else {
throw new IllegalArgumentException(
"Unsupported request. Fake answer is not implemented for " + req
.getCmdType());
}
} catch (Exception ex) {
LOGGER.error("Error on creating fake SCM response", ex);
return null;
}
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.freon;

import java.io.IOException;

import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Node;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto.Builder;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Fake SCM client to return a simulated block location.
*/
public final class FakeScmContainerLocationProtocolClient {

private static final Logger LOGGER =
LoggerFactory.getLogger(FakeScmContainerLocationProtocolClient.class);

private FakeScmContainerLocationProtocolClient() {
}

public static ScmContainerLocationResponse submitRequest(
ScmContainerLocationRequest req)
throws IOException {
try {
if (req.getCmdType() == Type.QueryNode) {
Builder builder = NodeQueryResponseProto.newBuilder();
for (DatanodeDetailsProto datanode : FakeClusterTopology.INSTANCE
.getAllDatanodes()) {
builder.addDatanodes(Node.newBuilder()
.setNodeID(datanode)
.addNodeStates(NodeState.HEALTHY)
.build());
}

return ScmContainerLocationResponse.newBuilder()
.setCmdType(Type.QueryNode)
.setStatus(Status.OK)
.setNodeQueryResponse(builder.build())
.build();
} else {
throw new IllegalArgumentException(
"Unsupported request. Fake answer is not implemented for " + req
.getCmdType());
}
} catch (Exception ex) {
LOGGER.error("Error on creating fake SCM response", ex);
return null;
}
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p>
* Freon related helper classes used for load testing.
*/

/**
* Freon related helper classes used for load testing.
*/
package org.apache.hadoop.hdds.freon;
Original file line number Diff line number Diff line change
Expand Up @@ -111,13 +111,18 @@ private ScmContainerLocationResponse submitRequest(
builderConsumer.accept(builder);
ScmContainerLocationRequest wrapper = builder.build();

response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
response = submitRpcRequest(wrapper);
} catch (ServiceException ex) {
throw ProtobufHelper.getRemoteException(ex);
}
return response;
}

private ScmContainerLocationResponse submitRpcRequest(
ScmContainerLocationRequest wrapper) throws ServiceException {
return rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
}

/**
* Asks SCM where a container should be allocated. SCM responds with the set
* of datanodes that should be used creating this container. Ozone/SCM only
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ public ColumnFamilyOptions getColumnFamilyOptions() {
new BlockBasedTableConfig()
.setBlockCache(new LRUCache(blockCacheSize))
.setBlockSize(blockSize)
.setCacheIndexAndFilterBlocks(true)
.setPinL0FilterAndIndexBlocksInCache(true)
.setFilterPolicy(new BloomFilter()));
}
Expand Down

0 comments on commit 281faf3

Please sign in to comment.