Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

package org.elasticsearch.action.admin.cluster.desirednodes;

import org.elasticsearch.TransportVersion;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
Expand All @@ -24,7 +25,7 @@
import java.util.Objects;

public class UpdateDesiredNodesRequest extends AcknowledgedRequest<UpdateDesiredNodesRequest> {
private static final Version DRY_RUN_VERSION = Version.V_8_4_0;
private static final TransportVersion DRY_RUN_VERSION = TransportVersion.V_8_4_0;

private final String historyID;
private final long version;
Expand Down Expand Up @@ -58,7 +59,7 @@ public UpdateDesiredNodesRequest(StreamInput in) throws IOException {
this.historyID = in.readString();
this.version = in.readLong();
this.nodes = in.readList(DesiredNode::readFrom);
if (in.getVersion().onOrAfter(DRY_RUN_VERSION)) {
if (in.getTransportVersion().onOrAfter(DRY_RUN_VERSION)) {
this.dryRun = in.readBoolean();
} else {
this.dryRun = false;
Expand All @@ -71,7 +72,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(historyID);
out.writeLong(version);
out.writeList(nodes);
if (out.getVersion().onOrAfter(DRY_RUN_VERSION)) {
if (out.getTransportVersion().onOrAfter(DRY_RUN_VERSION)) {
out.writeBoolean(dryRun);
}
}
Expand Down
14 changes: 7 additions & 7 deletions server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

package org.elasticsearch.cluster;

import org.elasticsearch.Version;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
Expand Down Expand Up @@ -42,8 +42,8 @@ public class ClusterInfo implements ToXContentFragment, Writeable {

public static final ClusterInfo EMPTY = new ClusterInfo();

public static final Version DATA_SET_SIZE_SIZE_VERSION = Version.V_7_13_0;
public static final Version DATA_PATH_NEW_KEY_VERSION = Version.V_8_6_0;
public static final TransportVersion DATA_SET_SIZE_SIZE_VERSION = TransportVersion.V_7_13_0;
public static final TransportVersion DATA_PATH_NEW_KEY_VERSION = TransportVersion.V_8_6_0;

private final Map<String, DiskUsage> leastAvailableSpaceUsage;
private final Map<String, DiskUsage> mostAvailableSpaceUsage;
Expand Down Expand Up @@ -87,12 +87,12 @@ public ClusterInfo(StreamInput in) throws IOException {
this.leastAvailableSpaceUsage = in.readImmutableMap(StreamInput::readString, DiskUsage::new);
this.mostAvailableSpaceUsage = in.readImmutableMap(StreamInput::readString, DiskUsage::new);
this.shardSizes = in.readImmutableMap(StreamInput::readString, StreamInput::readLong);
if (in.getVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) {
if (in.getTransportVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) {
this.shardDataSetSizes = in.readImmutableMap(ShardId::new, StreamInput::readLong);
} else {
this.shardDataSetSizes = Map.of();
}
if (in.getVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) {
if (in.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) {
this.dataPath = in.readImmutableMap(NodeAndShard::new, StreamInput::readString);
} else {
this.dataPath = in.readImmutableMap(nested -> NodeAndShard.from(new ShardRouting(nested)), StreamInput::readString);
Expand All @@ -109,10 +109,10 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o));
out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o));
out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> o.writeLong(v == null ? -1 : v));
if (out.getVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) {
if (out.getTransportVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) {
out.writeMap(this.shardDataSetSizes, (o, s) -> s.writeTo(o), StreamOutput::writeLong);
}
if (out.getVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) {
if (out.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) {
out.writeMap(this.dataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString);
} else {
out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString);
Expand Down
10 changes: 5 additions & 5 deletions server/src/main/java/org/elasticsearch/cluster/ClusterState.java
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

package org.elasticsearch.cluster;

import org.elasticsearch.Version;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
Expand Down Expand Up @@ -841,7 +841,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr
Custom customIndexMetadata = in.readNamedWriteable(Custom.class);
builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata);
}
if (in.getVersion().before(Version.V_8_0_0)) {
if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) {
in.readVInt(); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x
}
return builder.build();
Expand All @@ -857,7 +857,7 @@ public void writeTo(StreamOutput out) throws IOException {
nodes.writeTo(out);
blocks.writeTo(out);
VersionedNamedWriteable.writeVersionedWritables(out, customs);
if (out.getVersion().before(Version.V_8_0_0)) {
if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) {
out.writeVInt(-1); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x
}
}
Expand Down Expand Up @@ -904,7 +904,7 @@ private static class ClusterStateDiff implements Diff<ClusterState> {
metadata = Metadata.readDiffFrom(in);
blocks = ClusterBlocks.readDiffFrom(in);
customs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER);
if (in.getVersion().before(Version.V_8_0_0)) {
if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) {
in.readVInt(); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x
}
}
Expand All @@ -920,7 +920,7 @@ public void writeTo(StreamOutput out) throws IOException {
metadata.writeTo(out);
blocks.writeTo(out);
customs.writeTo(out);
if (out.getVersion().before(Version.V_8_0_0)) {
if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) {
out.writeVInt(-1); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,10 +190,10 @@ public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.CURRENT.minimumCompatibilityVersion();
}

private static final Version DIFFABLE_VERSION = Version.V_8_5_0;
private static final TransportVersion DIFFABLE_VERSION = TransportVersion.V_8_5_0;

public static NamedDiff<Custom> readDiffFrom(StreamInput in) throws IOException {
if (in.getVersion().onOrAfter(DIFFABLE_VERSION)) {
if (in.getTransportVersion().onOrAfter(DIFFABLE_VERSION)) {
return new SnapshotInProgressDiff(in);
}
return readDiffFrom(Custom.class, TYPE, in);
Expand Down Expand Up @@ -1625,7 +1625,7 @@ public String getWriteableName() {
@Override
public void writeTo(StreamOutput out) throws IOException {
assert after != null : "should only write instances that were diffed from this node's state";
if (out.getVersion().onOrAfter(DIFFABLE_VERSION)) {
if (out.getTransportVersion().onOrAfter(DIFFABLE_VERSION)) {
mapDiff.writeTo(out);
} else {
new SimpleDiffable.CompleteDiff<>(after).writeTo(out);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
*/
package org.elasticsearch.cluster.coordination;

import org.elasticsearch.Version;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.CheckedSupplier;
import org.elasticsearch.common.bytes.BytesReference;
Expand All @@ -28,10 +28,10 @@ public class ValidateJoinRequest extends TransportRequest {

public ValidateJoinRequest(StreamInput in) throws IOException {
super(in);
if (in.getVersion().onOrAfter(Version.V_8_3_0)) {
if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) {
// recent versions send a BytesTransportRequest containing a compressed representation of the state
final var bytes = in.readReleasableBytesReference();
final var version = in.getVersion();
final var version = in.getTransportVersion();
final var namedWriteableRegistry = in.namedWriteableRegistry();
this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry);
this.refCounted = bytes;
Expand All @@ -43,16 +43,19 @@ public ValidateJoinRequest(StreamInput in) throws IOException {
}
}

private static ClusterState readCompressed(Version version, BytesReference bytes, NamedWriteableRegistry namedWriteableRegistry)
throws IOException {
private static ClusterState readCompressed(
TransportVersion version,
BytesReference bytes,
NamedWriteableRegistry namedWriteableRegistry
) throws IOException {
try (
var bytesStreamInput = bytes.streamInput();
var in = new NamedWriteableAwareStreamInput(
new InputStreamStreamInput(CompressorFactory.COMPRESSOR.threadLocalInputStream(bytesStreamInput)),
namedWriteableRegistry
)
) {
in.setVersion(version);
in.setTransportVersion(version);
return ClusterState.readFrom(in, null);
}
}
Expand All @@ -64,7 +67,7 @@ public ValidateJoinRequest(ClusterState state) {

@Override
public void writeTo(StreamOutput out) throws IOException {
assert out.getVersion().before(Version.V_8_3_0);
assert out.getTransportVersion().before(TransportVersion.V_8_3_0);
super.writeTo(out);
stateSupplier.get().writeTo(out);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

package org.elasticsearch.cluster.metadata;

import org.elasticsearch.Version;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.SimpleDiffable;
import org.elasticsearch.common.Strings;
Expand Down Expand Up @@ -331,12 +331,12 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) {

DataStreamTemplate(StreamInput in) throws IOException {
hidden = in.readBoolean();
if (in.getVersion().onOrAfter(Version.V_8_0_0)) {
if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) {
allowCustomRouting = in.readBoolean();
} else {
allowCustomRouting = false;
}
if (in.getVersion().onOrAfter(Version.V_8_1_0) && in.getVersion().before(Version.V_8_3_0)) {
if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) && in.getTransportVersion().before(TransportVersion.V_8_3_0)) {
// Accidentally included index_mode to binary node to node protocol in previous releases.
// (index_mode is removed and was part of code based when tsdb was behind a feature flag)
// (index_mode was behind a feature in the xcontent parser, so it could never actually used)
Expand Down Expand Up @@ -379,10 +379,11 @@ public boolean isAllowCustomRouting() {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(hidden);
if (out.getVersion().onOrAfter(Version.V_8_0_0)) {
if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) {
out.writeBoolean(allowCustomRouting);
}
if (out.getVersion().onOrAfter(Version.V_8_1_0) && out.getVersion().before(Version.V_8_3_0)) {
if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)
&& out.getTransportVersion().before(TransportVersion.V_8_3_0)) {
// See comment in constructor.
out.writeBoolean(false);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.PointValues;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.SimpleDiffable;
import org.elasticsearch.common.Strings;
Expand Down Expand Up @@ -529,8 +529,8 @@ public DataStream(StreamInput in) throws IOException {
in.readBoolean(),
in.readBoolean(),
in.readBoolean(),
in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readBoolean() : false,
in.getVersion().onOrAfter(Version.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null
in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readBoolean() : false,
in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null
);
}

Expand All @@ -553,10 +553,10 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(hidden);
out.writeBoolean(replicated);
out.writeBoolean(system);
if (out.getVersion().onOrAfter(Version.V_8_0_0)) {
if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) {
out.writeBoolean(allowCustomRouting);
}
if (out.getVersion().onOrAfter(Version.V_8_1_0)) {
if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) {
out.writeOptionalEnum(indexMode);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
*/
package org.elasticsearch.cluster.metadata;

import org.elasticsearch.Version;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.SimpleDiffable;
import org.elasticsearch.common.ParsingException;
Expand Down Expand Up @@ -167,7 +167,7 @@ public DataStreamAlias(StreamInput in) throws IOException {
this.name = in.readString();
this.dataStreams = in.readStringList();
this.writeDataStream = in.readOptionalString();
if (in.getVersion().onOrAfter(Version.V_8_7_0)) {
if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) {
this.dataStreamToFilterMap = in.readMap(StreamInput::readString, CompressedXContent::readCompressedString);
} else {
this.dataStreamToFilterMap = new HashMap<>();
Expand Down Expand Up @@ -398,16 +398,16 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeStringCollection(dataStreams);
out.writeOptionalString(writeDataStream);
if (out.getVersion().onOrAfter(Version.V_8_7_0)) {
if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) {
out.writeMap(dataStreamToFilterMap, StreamOutput::writeString, (out1, filter) -> filter.writeTo(out1));
} else {
if (dataStreamToFilterMap.isEmpty()) {
out.writeBoolean(false);
} else {
/*
* Versions before 8.7 incorrectly only allowed a single filter for all datastreams, and randomly dropped all others. We
* replicate that buggy behavior here if we have to write to an older node because there is no way to send multipole
* filters to an older node.
* TransportVersions before 8.7 incorrectly only allowed a single filter for all datastreams,
* and randomly dropped all others. We replicate that buggy behavior here if we have to write
* to an older node because there is no way to send multipole filters to an older node.
*/
dataStreamToFilterMap.values().iterator().next().writeTo(out);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException {
final var settings = Settings.readSettingsFromStream(in);
final Processors processors;
final ProcessorsRange processorsRange;
if (in.getVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) {
if (in.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION.transportVersion)) {
processors = in.readOptionalWriteable(Processors::readFrom);
processorsRange = in.readOptionalWriteable(ProcessorsRange::readFrom);
} else {
Expand All @@ -189,7 +189,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
settings.writeTo(out);
if (out.getVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) {
if (out.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION.transportVersion)) {
out.writeOptionalWriteable(processors);
out.writeOptionalWriteable(processorsRange);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

package org.elasticsearch.cluster.metadata;

import org.elasticsearch.TransportVersion;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
Expand All @@ -30,7 +31,7 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status)
ToXContentObject,
Comparable<DesiredNodeWithStatus> {

private static final Version STATUS_TRACKING_SUPPORT_VERSION = Version.V_8_4_0;
private static final TransportVersion STATUS_TRACKING_SUPPORT_VERSION = TransportVersion.V_8_4_0;
private static final ParseField STATUS_FIELD = new ParseField("status");

public static final ConstructingObjectParser<DesiredNodeWithStatus, Void> PARSER = new ConstructingObjectParser<>(
Expand Down Expand Up @@ -77,7 +78,7 @@ public String externalId() {
public static DesiredNodeWithStatus readFrom(StreamInput in) throws IOException {
final var desiredNode = DesiredNode.readFrom(in);
final Status status;
if (in.getVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) {
if (in.getTransportVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) {
status = Status.fromValue(in.readShort());
} else {
// During upgrades, we consider all desired nodes as PENDING
Expand All @@ -93,7 +94,7 @@ public static DesiredNodeWithStatus readFrom(StreamInput in) throws IOException
@Override
public void writeTo(StreamOutput out) throws IOException {
desiredNode.writeTo(out);
if (out.getVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) {
if (out.getTransportVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) {
out.writeShort(status.value);
}
}
Expand Down
Loading