Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDDS-2445. Replace ToStringBuilder in BlockData #132

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -74,10 +74,14 @@ public void setContainerBlockID(ContainerBlockID containerBlockID) {

@Override
public String toString() {
return new StringBuilder().append(getContainerBlockID().toString())
.append(" bcsId: ")
.append(blockCommitSequenceId)
.toString();
StringBuilder sb = new StringBuilder(64);
appendTo(sb);
return sb.toString();
}

public void appendTo(StringBuilder sb) {
containerBlockID.appendTo(sb);
sb.append(" bcsId: ").append(blockCommitSequenceId);
}

public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() {
Expand Down
Expand Up @@ -42,11 +42,14 @@ public long getLocalID() {

@Override
public String toString() {
return new StringBuffer()
.append("conID: ")
.append(containerID)
.append(" locID: ")
.append(localID).toString();
StringBuilder sb = new StringBuilder(48);
appendTo(sb);
return sb.toString();
}

public void appendTo(StringBuilder sb) {
sb.append("conID: ").append(containerID)
.append(" locID: ").append(localID);
}

public HddsProtos.ContainerBlockID getProtobuf() {
Expand Down
Expand Up @@ -17,8 +17,6 @@
*/
package org.apache.hadoop.ozone.container.common.helpers;

import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.client.BlockID;
import com.google.common.base.Preconditions;
Expand Down Expand Up @@ -254,9 +252,19 @@ public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
size = 0L;
} else {
final int n = chunks.size();
chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
size = chunks.parallelStream().mapToLong(
ContainerProtos.ChunkInfo::getLen).sum();
if (n == 0) {
chunkList = null;
size = 0;
} else if (n == 1) {
ContainerProtos.ChunkInfo singleChunk = chunks.get(0);
chunkList = singleChunk;
size = singleChunk.getLen();
} else {
chunkList = chunks;
size = chunks.parallelStream()
.mapToLong(ContainerProtos.ChunkInfo::getLen)
.sum();
}
}
}

Expand All @@ -270,9 +278,15 @@ public long getSize() {

@Override
public String toString() {
return new ToStringBuilder(this, ToStringStyle.NO_CLASS_NAME_STYLE)
.append("blockId", blockID.toString())
.append("size", this.size)
.toString();
StringBuilder sb = new StringBuilder(112);
appendTo(sb);
return sb.toString();
}

public void appendTo(StringBuilder sb) {
sb.append("[blockId=");
blockID.appendTo(sb);
sb.append(",size=").append(size);
sb.append("]");
}
}
Expand Up @@ -17,6 +17,7 @@

package org.apache.hadoop.ozone.container.common.helpers;

import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.common.Checksum;
import org.junit.Assert;
Expand All @@ -31,6 +32,8 @@
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;

import static org.junit.Assert.assertEquals;

/**
* Tests to test block deleting service.
*/
Expand Down Expand Up @@ -129,4 +132,13 @@ public void testSetChunks() {
assertChunks(expected, computed);
}
}

@Test
public void testToString() {
final BlockID blockID = new BlockID(5, 123);
blockID.setBlockCommitSequenceId(42);
final BlockData subject = new BlockData(blockID);
assertEquals("[blockId=conID: 5 locID: 123 bcsId: 42,size=0]",
subject.toString());
}
}
@@ -0,0 +1,166 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.genesis;

import com.google.common.base.Preconditions;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;

import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;

/**
* Benchmarks various implementations of {@link BlockData#toString}.
*/
@State(Scope.Benchmark)
public class BenchmarkBlockDataToString {

@Param("1000")
private int count;

@Param({"112"})
private int capacity;

private List<BlockData> data;
private List<String> values;

@Setup
public void createData() {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
data = new ArrayList<>(count);
values = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
BlockID blockID = new BlockID(rnd.nextLong(), rnd.nextLong());
BlockData item = new BlockData(blockID);
item.setBlockCommitSequenceId(rnd.nextLong());
data.add(item);
values.add(item.toString());
}
}

@Benchmark
public void usingToStringBuilderDefaultCapacity(
BenchmarkBlockDataToString state, Blackhole sink) {
for (int i = 0; i < state.count; i++) {
BlockData item = state.data.get(i);
String str = new ToStringBuilder(item, ToStringStyle.NO_CLASS_NAME_STYLE)
.append("blockId", item.getBlockID().toString())
.append("size", item.getSize())
.toString();
sink.consume(str);
Preconditions.checkArgument(str.equals(state.values.get(i)));
}
}

@Benchmark
public void usingToStringBuilder(
BenchmarkBlockDataToString state, Blackhole sink) {
for (int i = 0; i < state.count; i++) {
BlockData item = state.data.get(i);
String str = new ToStringBuilder(item, ToStringStyle.NO_CLASS_NAME_STYLE,
new StringBuffer(capacity))
.append("blockId", item.getBlockID().toString())
.append("size", item.getSize())
.toString();
sink.consume(str);
Preconditions.checkArgument(str.equals(state.values.get(i)));
}
}

@Benchmark
public void usingSimpleStringBuilder(
BenchmarkBlockDataToString state, Blackhole sink) {
for (int i = 0; i < state.count; i++) {
BlockData item = state.data.get(i);
String str = new StringBuilder(capacity)
.append("[")
.append("blockId=")
.append(item.getBlockID())
.append(",size=")
.append(item.getSize())
.append("]")
.toString();
sink.consume(str);
Preconditions.checkArgument(str.equals(state.values.get(i)));
}
}

@Benchmark
public void usingPushDownStringBuilder(
BenchmarkBlockDataToString state, Blackhole sink) {
for (int i = 0; i < state.count; i++) {
BlockData item = state.data.get(i);
StringBuilder sb = new StringBuilder(capacity);
item.appendTo(sb);
String str = sb.toString();
sink.consume(str);
Preconditions.checkArgument(str.equals(state.values.get(i)));
}
}

@Benchmark
public void usingConcatenation(
BenchmarkBlockDataToString state, Blackhole sink) {
for (int i = 0; i < state.count; i++) {
BlockData item = state.data.get(i);
String str = "[blockId=" +
item.getBlockID() +
",size=" +
item.getSize() +
"]";
sink.consume(str);
Preconditions.checkArgument(str.equals(state.values.get(i)));
}
}

@Benchmark
public void usingInlineStringBuilder(
BenchmarkBlockDataToString state, Blackhole sink) {
for (int i = 0; i < state.count; i++) {
BlockData item = state.data.get(i);
BlockID blockID = item.getBlockID();
ContainerBlockID containerBlockID = blockID.getContainerBlockID();
String str = new StringBuilder(capacity)
.append("[")
.append("blockId=")
.append("conID: ")
.append(containerBlockID.getContainerID())
.append(" locID: ")
.append(containerBlockID.getLocalID())
.append(" bcsId: ")
.append(blockID.getBlockCommitSequenceId())
.append(",size=")
.append(item.getSize())
.append("]")
.toString();
sink.consume(str);
Preconditions.checkArgument(str.equals(state.values.get(i)));
}
}

}
Expand Up @@ -18,6 +18,7 @@

package org.apache.hadoop.ozone.genesis;

import org.openjdk.jmh.profile.GCProfiler;
import org.openjdk.jmh.profile.StackProfiler;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
Expand Down Expand Up @@ -83,6 +84,7 @@ public static void main(String[] args) throws RunnerException {
optionsBuilder.warmupIterations(2)
.measurementIterations(20)
.addProfiler(StackProfiler.class)
.addProfiler(GCProfiler.class)
.shouldDoGC(true)
.forks(1)
.threads(numThreads);
Expand Down