Skip to content

Commit

Permalink
Make some ByteSizeValue instances always use the singleton (#91178)
Browse files Browse the repository at this point in the history
This comes out of a user heap dump investigation. In some snapshot
corner cases we ran into about 100M of duplicate 0b instances.

-> even though it's a little heavy handed, lets make it so the common
constants that we already have are used whenever possible.
  • Loading branch information
original-brownbear committed Oct 28, 2022
1 parent 2421bf3 commit 362a7f0
Show file tree
Hide file tree
Showing 192 changed files with 594 additions and 567 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ protected DataStreamsStatsAction.Response newResponse(
entry -> new DataStreamsStatsAction.DataStreamStats(
entry.getKey(),
entry.getValue().backingIndices.size(),
new ByteSizeValue(entry.getValue().storageBytes),
ByteSizeValue.ofBytes(entry.getValue().storageBytes),
entry.getValue().maxTimestamp
)
)
Expand All @@ -237,7 +237,7 @@ protected DataStreamsStatsAction.Response newResponse(
shardFailures,
aggregatedDataStreamsStats.size(),
allBackingIndices.size(),
new ByteSizeValue(totalStoreSizeBytes),
ByteSizeValue.ofBytes(totalStoreSizeBytes),
dataStreamStats
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,12 @@ public static DataStreamsStatsAction.Response randomStatsResponse() {
totalStoreSize += storeSize;
long maximumTimestamp = randomRecentTimestamp();
dataStreamStats.add(
new DataStreamsStatsAction.DataStreamStats(dataStreamName, backingIndices, new ByteSizeValue(storeSize), maximumTimestamp)
new DataStreamsStatsAction.DataStreamStats(
dataStreamName,
backingIndices,
ByteSizeValue.ofBytes(storeSize),
maximumTimestamp
)
);
}
int totalShards = randomIntBetween(backingIndicesTotal, backingIndicesTotal * 3);
Expand All @@ -66,7 +71,7 @@ public static DataStreamsStatsAction.Response randomStatsResponse() {
exceptions,
dataStreamCount,
backingIndicesTotal,
new ByteSizeValue(totalStoreSize),
ByteSizeValue.ofBytes(totalStoreSize),
dataStreamStats.toArray(DataStreamsStatsAction.DataStreamStats[]::new)
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ void sendBulkRequest(BulkRequest request, Runnable onSuccess) {
"[{}]: sending [{}] entry, [{}] bulk request",
task.getId(),
requestSize,
new ByteSizeValue(request.estimatedSizeInBytes())
ByteSizeValue.ofBytes(request.estimatedSizeInBytes())
);
}
if (task.isCancelled()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
import static java.util.Collections.emptyMap;

public class AzureStorageService {
public static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);
public static final ByteSizeValue MIN_CHUNK_SIZE = ByteSizeValue.ofBytes(1);

/**
* The maximum size of a BlockBlob block.
Expand All @@ -48,12 +48,11 @@ public class AzureStorageService {
* Default block size for multi-block uploads. The Azure repository will use the Put block and Put block list APIs to split the
* stream into several part, each of block_size length, and will upload each part in its own request.
*/
private static final ByteSizeValue DEFAULT_BLOCK_SIZE = new ByteSizeValue(
private static final ByteSizeValue DEFAULT_BLOCK_SIZE = ByteSizeValue.ofBytes(
Math.max(
ByteSizeUnit.MB.toBytes(5), // minimum value
Math.min(MAX_BLOCK_SIZE.getBytes(), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)
),
ByteSizeUnit.BYTES
)
);

/**
Expand All @@ -65,7 +64,7 @@ public class AzureStorageService {
/**
* Maximum allowed blob size in Azure blob store.
*/
public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(MAX_BLOB_SIZE, ByteSizeUnit.BYTES);
public static final ByteSizeValue MAX_CHUNK_SIZE = ByteSizeValue.ofBytes(MAX_BLOB_SIZE);

private static final long DEFAULT_UPLOAD_BLOCK_SIZE = DEFAULT_BLOCK_SIZE.getBytes();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository {
private static final Logger logger = LogManager.getLogger(GoogleCloudStorageRepository.class);

// package private for testing
static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);
static final ByteSizeValue MIN_CHUNK_SIZE = ByteSizeValue.ONE;

/**
* Maximum allowed object size in GCS.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,11 @@ class S3Repository extends MeteredBlobStoreRepository {
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of
* the available memory for smaller heaps.
*/
private static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(
private static final ByteSizeValue DEFAULT_BUFFER_SIZE = ByteSizeValue.ofBytes(
Math.max(
ByteSizeUnit.MB.toBytes(5), // minimum value
Math.min(ByteSizeUnit.MB.toBytes(100), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)
),
ByteSizeUnit.BYTES
)
);

static final Setting<String> BUCKET_SETTING = Setting.simpleString("bucket");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ public void testReadRetriesAfterMeaningfulProgress() throws Exception {
0,
randomFrom(1000, Math.toIntExact(S3Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY).getBytes()))
);
final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, new ByteSizeValue(bufferSizeBytes));
final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, ByteSizeValue.ofBytes(bufferSizeBytes));
final int meaningfulProgressBytes = Math.max(1, bufferSizeBytes / 100);

final byte[] bytes = randomBlobContent();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
*
* By default we assume the Ethernet MTU (1500 bytes) but users can override it with a system property.
*/
private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("es.net.mtu", "1500")));
private static final ByteSizeValue MTU = ByteSizeValue.ofBytes(Long.parseLong(System.getProperty("es.net.mtu", "1500")));

private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components";

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@ public class NettyAllocator {
ALLOCATOR = ByteBufAllocator.DEFAULT;
SUGGESTED_MAX_ALLOCATION_SIZE = 1024 * 1024;
DESCRIPTION = "[name=netty_default, suggested_max_allocation_size="
+ new ByteSizeValue(SUGGESTED_MAX_ALLOCATION_SIZE)
+ ByteSizeValue.ofBytes(SUGGESTED_MAX_ALLOCATION_SIZE)
+ ", factors={es.unsafe.use_netty_default_allocator=true}]";
} else {
final long heapSizeInBytes = JvmInfo.jvmInfo().getMem().getHeapMax().getBytes();
final boolean g1gcEnabled = Boolean.parseBoolean(JvmInfo.jvmInfo().useG1GC());
final long g1gcRegionSizeInBytes = JvmInfo.jvmInfo().getG1RegionSize();
final boolean g1gcRegionSizeIsKnown = g1gcRegionSizeInBytes != -1;
ByteSizeValue heapSize = new ByteSizeValue(heapSizeInBytes);
ByteSizeValue g1gcRegionSize = new ByteSizeValue(g1gcRegionSizeInBytes);
ByteSizeValue heapSize = ByteSizeValue.ofBytes(heapSizeInBytes);
ByteSizeValue g1gcRegionSize = ByteSizeValue.ofBytes(g1gcRegionSizeInBytes);

ByteBufAllocator delegate;
if (useUnpooled(heapSizeInBytes, g1gcEnabled, g1gcRegionSizeIsKnown, g1gcRegionSizeInBytes)) {
Expand All @@ -68,7 +68,7 @@ public class NettyAllocator {
SUGGESTED_MAX_ALLOCATION_SIZE = 1024 * 1024;
}
DESCRIPTION = "[name=unpooled, suggested_max_allocation_size="
+ new ByteSizeValue(SUGGESTED_MAX_ALLOCATION_SIZE)
+ ByteSizeValue.ofBytes(SUGGESTED_MAX_ALLOCATION_SIZE)
+ ", factors={es.unsafe.use_unpooled_allocator="
+ System.getProperty(USE_UNPOOLED)
+ ", g1gc_enabled="
Expand Down Expand Up @@ -114,12 +114,12 @@ public class NettyAllocator {
useCacheForAllThreads
);
int chunkSizeInBytes = pageSize << maxOrder;
ByteSizeValue chunkSize = new ByteSizeValue(chunkSizeInBytes);
ByteSizeValue chunkSize = ByteSizeValue.ofBytes(chunkSizeInBytes);
SUGGESTED_MAX_ALLOCATION_SIZE = chunkSizeInBytes;
DESCRIPTION = "[name=elasticsearch_configured, chunk_size="
+ chunkSize
+ ", suggested_max_allocation_size="
+ new ByteSizeValue(SUGGESTED_MAX_ALLOCATION_SIZE)
+ ByteSizeValue.ofBytes(SUGGESTED_MAX_ALLOCATION_SIZE)
+ ", factors={es.unsafe.use_netty_default_chunk_and_page_size="
+ useDefaultChunkAndPageSize()
+ ", g1gc_enabled="
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ public void testEachMasterPublishesTheirThresholds() throws Exception {
Map<String, String> watermarkByNode = new HashMap<>();
Map<String, ByteSizeValue> maxHeadroomByNode = new HashMap<>();
for (int i = 0; i < numberOfNodes; i++) {
ByteSizeValue randomBytes = new ByteSizeValue(randomLongBetween(6, 19));
ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19));
String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString();
ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE;
String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString());
Expand Down Expand Up @@ -74,20 +74,20 @@ public void testEachMasterPublishesTheirThresholds() throws Exception {
public void testWatermarkSettingUpdate() throws Exception {
try (InternalTestCluster internalCluster = internalCluster()) {
int numberOfNodes = 3;
ByteSizeValue randomBytes = new ByteSizeValue(randomLongBetween(6, 19));
ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19));
String initialWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString();
ByteSizeValue initialMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE;
for (int i = 0; i < numberOfNodes; i++) {
startNode(internalCluster, initialWatermark, initialMaxHeadroom.toString());
}

randomBytes = new ByteSizeValue(randomLongBetween(101, 200));
randomBytes = ByteSizeValue.ofBytes(randomLongBetween(101, 200));
String updatedLowWatermark = percentageMode ? randomIntBetween(40, 59) + "%" : randomBytes.toString();
ByteSizeValue updatedLowMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE;
randomBytes = new ByteSizeValue(randomLongBetween(50, 100));
randomBytes = ByteSizeValue.ofBytes(randomLongBetween(50, 100));
String updatedHighWatermark = percentageMode ? randomIntBetween(60, 90) + "%" : randomBytes.toString();
ByteSizeValue updatedHighMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE;
randomBytes = new ByteSizeValue(randomLongBetween(5, 10));
randomBytes = ByteSizeValue.ofBytes(randomLongBetween(5, 10));
String updatedFloodStageWatermark = percentageMode ? randomIntBetween(91, 95) + "%" : randomBytes.toString();
ByteSizeValue updatedFloodStageMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ public NodeInfo(StreamInput in) throws IOException {
version = Version.readVersion(in);
build = Build.readBuild(in);
if (in.readBoolean()) {
totalIndexingBuffer = new ByteSizeValue(in.readLong());
totalIndexingBuffer = ByteSizeValue.ofBytes(in.readLong());
} else {
totalIndexingBuffer = null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,23 +164,23 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par
builder.startObject(Fields.INCREMENTAL);
{
builder.field(Fields.FILE_COUNT, getIncrementalFileCount());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getIncrementalSize()));
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, ByteSizeValue.ofBytes(getIncrementalSize()));
}
builder.endObject();

if (getProcessedFileCount() != getIncrementalFileCount()) {
builder.startObject(Fields.PROCESSED);
{
builder.field(Fields.FILE_COUNT, getProcessedFileCount());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getProcessedSize()));
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, ByteSizeValue.ofBytes(getProcessedSize()));
}
builder.endObject();
}

builder.startObject(Fields.TOTAL);
{
builder.field(Fields.FILE_COUNT, getTotalFileCount());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalSize()));
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, ByteSizeValue.ofBytes(getTotalSize()));
}
builder.endObject();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -515,14 +515,14 @@ public TimeValue getMaxUpTime() {
* Total heap used in the cluster
*/
public ByteSizeValue getHeapUsed() {
return new ByteSizeValue(heapUsed);
return ByteSizeValue.ofBytes(heapUsed);
}

/**
* Maximum total heap available to the cluster
*/
public ByteSizeValue getHeapMax() {
return new ByteSizeValue(heapMax);
return ByteSizeValue.ofBytes(heapMax);
}

static final class Fields {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.field("version", version.toString());
builder.field("index_count", indexCount);
builder.field("primary_shard_count", primaryShardCount);
builder.humanReadableField("total_primary_bytes", "total_primary_size", new ByteSizeValue(totalPrimaryByteCount));
builder.humanReadableField("total_primary_bytes", "total_primary_size", ByteSizeValue.ofBytes(totalPrimaryByteCount));
builder.endObject();
return builder;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ public IndexDiskUsageStats add(IndexDiskUsageStats other) {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
final PerFieldDiskUsage total = total();
builder.field(STORE_SIZE, new ByteSizeValue(indexSizeInBytes));
builder.field(STORE_SIZE, ByteSizeValue.ofBytes(indexSizeInBytes));
builder.field(STORE_SIZE_IN_BYTES, indexSizeInBytes);

// all fields
Expand Down Expand Up @@ -252,30 +252,30 @@ long totalBytes() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
final long totalBytes = totalBytes();
builder.field(TOTAL, new ByteSizeValue(totalBytes));
builder.field(TOTAL, ByteSizeValue.ofBytes(totalBytes));
builder.field(TOTAL_IN_BYTES, totalBytes);

builder.startObject(INVERTED_INDEX);
builder.field(TOTAL, new ByteSizeValue(invertedIndexBytes));
builder.field(TOTAL, ByteSizeValue.ofBytes(invertedIndexBytes));
builder.field(TOTAL_IN_BYTES, invertedIndexBytes);
builder.endObject();

builder.field(STORED_FIELDS, new ByteSizeValue(storedFieldBytes));
builder.field(STORED_FIELDS, ByteSizeValue.ofBytes(storedFieldBytes));
builder.field(STORED_FIELDS_IN_BYTES, storedFieldBytes);

builder.field(DOC_VALUES, new ByteSizeValue(docValuesBytes));
builder.field(DOC_VALUES, ByteSizeValue.ofBytes(docValuesBytes));
builder.field(DOC_VALUES_IN_BYTES, docValuesBytes);

builder.field(POINTS, new ByteSizeValue(pointsBytes));
builder.field(POINTS, ByteSizeValue.ofBytes(pointsBytes));
builder.field(POINTS_IN_BYTES, pointsBytes);

builder.field(NORMS, new ByteSizeValue(normsBytes));
builder.field(NORMS, ByteSizeValue.ofBytes(normsBytes));
builder.field(NORMS_IN_BYTES, normsBytes);

builder.field(TERM_VECTORS, new ByteSizeValue(termVectorsBytes));
builder.field(TERM_VECTORS, ByteSizeValue.ofBytes(termVectorsBytes));
builder.field(TERM_VECTORS_IN_BYTES, termVectorsBytes);

builder.field(KNN_VECTORS, new ByteSizeValue(knnVectorsBytes));
builder.field(KNN_VECTORS, ByteSizeValue.ofBytes(knnVectorsBytes));
builder.field(KNN_VECTORS_IN_BYTES, knnVectorsBytes);
return builder;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentParser;
Expand All @@ -31,7 +30,7 @@ public MaxPrimaryShardSizeCondition(ByteSizeValue value) {

public MaxPrimaryShardSizeCondition(StreamInput in) throws IOException {
super(NAME, Type.MAX);
this.value = new ByteSizeValue(in.readVLong(), ByteSizeUnit.BYTES);
this.value = ByteSizeValue.ofBytes(in.readVLong());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentParser;
Expand All @@ -31,7 +30,7 @@ public MaxSizeCondition(ByteSizeValue value) {

public MaxSizeCondition(StreamInput in) throws IOException {
super(NAME, Type.MAX);
this.value = new ByteSizeValue(in.readVLong(), ByteSizeUnit.BYTES);
this.value = ByteSizeValue.ofBytes(in.readVLong());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ public MinPrimaryShardSizeCondition(ByteSizeValue value) {

public MinPrimaryShardSizeCondition(StreamInput in) throws IOException {
super(NAME, Type.MIN);
this.value = new ByteSizeValue(in);
this.value = ByteSizeValue.readFrom(in);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ public MinSizeCondition(ByteSizeValue value) {

public MinSizeCondition(StreamInput in) throws IOException {
super(NAME, Type.MIN);
this.value = new ByteSizeValue(in);
this.value = ByteSizeValue.readFrom(in);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,8 +228,8 @@ static Condition.Stats buildStats(@Nullable final IndexMetadata metadata, @Nulla
return new Condition.Stats(
docsStats == null ? 0 : docsStats.getCount(),
metadata.getCreationDate(),
new ByteSizeValue(docsStats == null ? 0 : docsStats.getTotalSizeInBytes()),
new ByteSizeValue(maxPrimaryShardSize),
ByteSizeValue.ofBytes(docsStats == null ? 0 : docsStats.getTotalSizeInBytes()),
ByteSizeValue.ofBytes(maxPrimaryShardSize),
maxPrimaryShardDocs
);
}
Expand Down

0 comments on commit 362a7f0

Please sign in to comment.