Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ public class AffinityPlacementConfig implements PlacementPluginConfig {
*
* <p>Nodes on which this system property is not defined are considered being in the same
* Availability Zone {@link #UNDEFINED_AVAILABILITY_ZONE} (hopefully the value of this constant is
* not the name of a real Availability Zone :).
* not the name of a real Availability Zone).
*/
public static final String AVAILABILITY_ZONE_SYSPROP = "availability_zone";

/**
* Name of the system property on a node indicating the type of replicas allowed on that node. The
* value of that system property is a comma separated list or a single string of value names of
* {@link org.apache.solr.cluster.Replica.ReplicaType} (case insensitive). If that property is not
* {@link org.apache.solr.cluster.Replica.ReplicaType} (case-insensitive). If that property is not
* defined, that node is considered accepting all replica types (i.e. undefined is equivalent to
* {@code "NRT,Pull,tlog"}).
*/
Expand Down Expand Up @@ -136,8 +136,8 @@ public class AffinityPlacementConfig implements PlacementPluginConfig {

/**
* Determines the maximum number of replicas of a particular type of a particular shard that can
* be placed within a single domain (as defined by the @link #SPREAD_DOMAIN_SYSPROP} System
* property.
* be placed within a single domain (as defined by the {@link #SPREAD_DOMAIN_SYSPROP} System
* property).
*/
@JsonProperty public Integer maxReplicasPerShardInDomain = -1;

Expand All @@ -163,7 +163,7 @@ public AffinityPlacementConfig(long minimalFreeDiskGB, long prioritizedFreeDiskG
* @param prioritizedFreeDiskGB prioritized free disk GB.
* @param withCollection configuration of co-located collections: keys are primary collection
* names and values are secondary collection names.
* @param collectionNodeType configuration of reequired node types per collection. Keys are
* @param collectionNodeType configuration of required node types per collection. Keys are
* collection names and values are comma-separated lists of required node types.
*/
public AffinityPlacementConfig(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@
* }' http://localhost:8983/api/cluster/plugin
* </pre>
*
* <p>In order to delete the placement-plugin section (and to fallback to either Legacy or rule
* based placement if configured for a collection), execute:
* <p>In order to delete the placement-plugin section (and fallback to either Legacy or rule based
* placement if configured for a collection), execute:
*
* <pre>
*
Expand Down Expand Up @@ -295,7 +295,7 @@ protected Map<Node, WeightedNode> getBaseWeightedNodes(
}
}

// If there are not multiple spreadDomains, then there is nothing to spread across
// only spread across if there are multiple spreadDomains
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I hope this is a clearer comment...

if (affinityPlacementContext.allSpreadDomains.size() < 2) {
affinityPlacementContext.doSpreadAcrossDomains = false;
}
Expand All @@ -307,8 +307,7 @@ AffinityNode newNodeFromMetrics(
Node node,
AttributeValues attrValues,
AffinityPlacementContext affinityPlacementContext,
boolean skipNodesWithErrors)
throws PlacementException {
boolean skipNodesWithErrors) {
Set<Replica.ReplicaType> supportedReplicaTypes =
attrValues.getSystemProperty(node, AffinityPlacementConfig.REPLICA_TYPE_SYSPROP).stream()
.flatMap(s -> Arrays.stream(s.split(",")))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ public List<PlacementPlan> computePlacements(
}

List<WeightedNode> nodesForRequest =
weightedNodes.stream().filter(request::isTargetingNode).collect(Collectors.toList());
weightedNodes.stream().filter(request::isTargetingNode).toList();
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if the read-only contract is different between these two options, but good to check. Same down below.

Copy link
Copy Markdown
Contributor Author

@epugh epugh Mar 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

humm, I'm hoping no differences because tests don't fail and IntelliJ recommended it! Let's see what copilot says!

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Claude looked at it and explained about the mutability thing... It's interesting that nothing in the class speaks to mutablity which feels like a trap. I mean, do I really need to think about mutability? It's like asking me to think about memory management!!! What are we? C code?


SolrCollection solrCollection = request.getCollection();
// Now place all replicas of all shards on available nodes
Expand Down Expand Up @@ -241,7 +241,7 @@ public BalancePlan computeBalancing(
List<Replica> availableReplicasToMove =
highestWeight.getAllReplicasOnNode().stream()
.sorted(Comparator.comparing(Replica::getReplicaName))
.collect(Collectors.toList());
.toList();
int combinedNodeWeights = highestWeight.calcWeight() + lowestWeight.calcWeight();
for (Replica r : availableReplicasToMove) {
// Only continue if the replica can be removed from the old node and moved to the new node
Expand Down Expand Up @@ -284,7 +284,7 @@ public BalancePlan computeBalancing(
break;
}
}
// For now we do not have any way to see if there are out-of-date notes in the middle of the
// For now, we do not have any way to see if there are out-of-date nodes in the middle of the
// TreeSet. Therefore, we need to re-sort this list after every selection. In the future, we
// should find a way to re-sort the out-of-date nodes without having to sort all nodes.
traversedHighNodes.addAll(orderedNodes);
Expand Down Expand Up @@ -442,12 +442,6 @@ public Set<String> getShardsOnNode(String collection) {
return replicas.getOrDefault(collection, Collections.emptyMap()).keySet();
}

public boolean hasShardOnNode(Shard shard) {
return replicas
.getOrDefault(shard.getCollection().getName(), Collections.emptyMap())
.containsKey(shard.getShardName());
}

public Set<Replica> getReplicasForShardOnNode(Shard shard) {
return Optional.ofNullable(replicas.get(shard.getCollection().getName()))
.map(m -> m.get(shard.getShardName()))
Expand Down Expand Up @@ -775,15 +769,6 @@ public void add(WeightedNode node) {
}
}

/**
* Get the number of nodes in the heap.
*
* @return number of nodes
*/
public int size() {
return size;
}

/**
* Check if the heap is empty.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
/**
* Factory for creating {@link RandomPlacementPlugin}, a placement plugin implementing random
* placement for new collection creation while preventing two replicas of same shard from being
* placed on same node..
* placed on same node.
*
* <p>See {@link RandomNode} for information on how this PlacementFactory weights nodes.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -748,7 +748,6 @@ public void testFreeDiskConstraintsWithNewReplicas() throws Exception {
int NUM_NODES = 3;
Builders.ClusterBuilder clusterBuilder =
Builders.newClusterBuilder().initializeLiveNodes(NUM_NODES);
Node smallNode = null;
for (int i = 0; i < NUM_NODES; i++) {
Builders.NodeBuilder nodeBuilder = clusterBuilder.getLiveNodeBuilders().get(i);
// Act as if the two replicas were placed on nodes 1 and 2
Expand Down
Loading