Skip to content

Commit

Permalink
Remove RU code and Versioned interfaces
Browse files Browse the repository at this point in the history
  • Loading branch information
Matko Medenjak committed Apr 25, 2019
1 parent fe1fd93 commit dd88114
Show file tree
Hide file tree
Showing 44 changed files with 86 additions and 216 deletions.
Expand Up @@ -22,7 +22,6 @@
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;
import com.hazelcast.spi.serialization.SerializationService;
import com.hazelcast.spi.tenantcontrol.TenantControl;

Expand All @@ -41,15 +40,16 @@
* @param <V> the value type
* @since 3.9
*/
public class PreJoinCacheConfig<K, V> extends CacheConfig<K, V> implements Versioned, IdentifiedDataSerializable {
public class PreJoinCacheConfig<K, V> extends CacheConfig<K, V> implements IdentifiedDataSerializable {
public PreJoinCacheConfig() {
super();
}

/**
* Constructor that copies given {@code cacheConfig}'s properties to a new {@link PreJoinCacheConfig}. It is assumed that
* the given {@code cacheConfig}'s key-value types have already been resolved to loaded classes.
* @param cacheConfig the original {@link CacheConfig} to copy into a new {@link PreJoinCacheConfig}
*
* @param cacheConfig the original {@link CacheConfig} to copy into a new {@link PreJoinCacheConfig}
*/
public PreJoinCacheConfig(CacheConfig cacheConfig) {
this(cacheConfig, true);
Expand Down
Expand Up @@ -22,15 +22,14 @@
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;

import java.io.IOException;

/**
* Default heap based implementation of {@link com.hazelcast.cache.CacheEntryView}.
*/
public class DefaultCacheEntryView
implements CacheEntryView<Data, Data>, IdentifiedDataSerializable, Versioned {
implements CacheEntryView<Data, Data>, IdentifiedDataSerializable {

private Data key;
private Data value;
Expand Down
Expand Up @@ -18,7 +18,6 @@

import com.hazelcast.internal.eviction.Evictable;
import com.hazelcast.internal.eviction.Expirable;
import com.hazelcast.nio.serialization.impl.Versioned;

/**
* <p>
Expand All @@ -28,7 +27,7 @@
*
* @param <V> the type of the value stored by this {@link CacheRecord}
*/
public interface CacheRecord<V, E> extends Expirable, Evictable<V>, Versioned {
public interface CacheRecord<V, E> extends Expirable, Evictable<V> {

/**
* Represents invalid (not set) time for creation time, expiration time, access time, etc...
Expand Down Expand Up @@ -75,12 +74,14 @@ public interface CacheRecord<V, E> extends Expirable, Evictable<V>, Versioned {

/**
* Sets the expiry policy for this record.
*
* @param expiryPolicy
*/
void setExpiryPolicy(E expiryPolicy);

/**
* Gets the expiryPolicy associated with this record.
*
* @return
*/
E getExpiryPolicy();
Expand Down
Expand Up @@ -17,21 +17,19 @@
package com.hazelcast.cardinality.impl.hyperloglog;

import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;

/**
* HyperLogLog is a redundant and highly available distributed data-structure used for cardinality estimation
* purposes on unique items in significantly sized data cultures. HyperLogLog uses P^2 byte registers for storage
* and computation.
*
* <p>
* HyperLogLog is an implementation of the two famous papers
* <ul>
* <li>http://algo.inria.fr/flajolet/Publications/FlFuGaMe07.pdf</li>
* <li>http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf</li>
* <li>http://algo.inria.fr/flajolet/Publications/FlFuGaMe07.pdf</li>
* <li>http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf</li>
* </ul>
*
*/
public interface HyperLogLog extends IdentifiedDataSerializable, Versioned {
public interface HyperLogLog extends IdentifiedDataSerializable {

/**
* Computes a new estimate for the current status of the registers.
Expand Down
Expand Up @@ -20,14 +20,13 @@
import com.hazelcast.cardinality.impl.hyperloglog.HyperLogLog;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.impl.Versioned;

import java.io.IOException;

import static com.hazelcast.cardinality.impl.hyperloglog.impl.HyperLogLogEncoding.SPARSE;

public class HyperLogLogImpl
implements HyperLogLog, Versioned {
implements HyperLogLog {

private static final int LOWER_P_BOUND = 4;
private static final int UPPER_P_BOUND = 16;
Expand Down
Expand Up @@ -20,7 +20,6 @@
import com.hazelcast.nio.Bits;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.impl.Versioned;

import java.io.IOException;
import java.util.Arrays;
Expand All @@ -32,7 +31,7 @@
*/
@SuppressWarnings("checkstyle:magicnumber")
public class SparseHyperLogLogEncoder
implements HyperLogLogEncoder, Versioned {
implements HyperLogLogEncoder {

private static final int P_PRIME = 25;
private static final int P_PRIME_MASK = 0x1ffffff;
Expand Down Expand Up @@ -230,7 +229,7 @@ private void mergeAndResetTmp() {

/**
* Variable length difference encoding for sorted integer lists.
*
* <p>
* Single byte, (7 bits) used to store the value if less or equal to 127,
* or more bytes for larger numbers, having the MSB bit set to 1 to signify
* the next_flag. Also, numbers are stored as a diff from the previous one
Expand Down
Expand Up @@ -21,7 +21,6 @@
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;
import com.hazelcast.spi.ObjectNamespace;
import com.hazelcast.spi.WaitNotifyKey;
import com.hazelcast.util.ConcurrencyUtil;
Expand All @@ -43,7 +42,7 @@
import static com.hazelcast.concurrent.lock.ObjectNamespaceSerializationHelper.writeNamespaceCompatibly;
import static com.hazelcast.util.SetUtil.createHashSet;

public final class LockStoreImpl implements IdentifiedDataSerializable, LockStore, Versioned {
public final class LockStoreImpl implements IdentifiedDataSerializable, LockStore {

private final transient ConstructorFunction<Data, LockResourceImpl> lockConstructor =
new ConstructorFunction<Data, LockResourceImpl>() {
Expand Down
Expand Up @@ -23,7 +23,6 @@
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;
import com.hazelcast.spi.LockInterceptorService;
import com.hazelcast.spi.NamedOperation;
import com.hazelcast.spi.ObjectNamespace;
Expand All @@ -40,7 +39,7 @@

public abstract class AbstractLockOperation extends Operation
implements PartitionAwareOperation, IdentifiedDataSerializable, NamedOperation,
ServiceNamespaceAware, Versioned {
ServiceNamespaceAware {

public static final int ANY_THREAD = 0;

Expand Down
Expand Up @@ -23,7 +23,6 @@
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;
import com.hazelcast.spi.ObjectNamespace;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.ServiceNamespace;
Expand All @@ -33,7 +32,7 @@
import java.util.LinkedList;

public class LockReplicationOperation extends Operation
implements IdentifiedDataSerializable, Versioned {
implements IdentifiedDataSerializable {

private final Collection<LockStoreImpl> locks = new LinkedList<LockStoreImpl>();

Expand All @@ -45,7 +44,7 @@ public LockReplicationOperation(LockStoreContainer container, int partitionId, i
}

public LockReplicationOperation(LockStoreContainer container, int partitionId, int replicaIndex,
Collection<ServiceNamespace> namespaces) {
Collection<ServiceNamespace> namespaces) {

setPartitionId(partitionId).setReplicaIndex(replicaIndex);

Expand Down
Expand Up @@ -19,7 +19,6 @@
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;
import com.hazelcast.spi.merge.HyperLogLogMergePolicy;

import java.io.IOException;
Expand All @@ -33,7 +32,7 @@
/**
* Configuration options for the {@link com.hazelcast.cardinality.CardinalityEstimator}
*/
public class CardinalityEstimatorConfig implements IdentifiedDataSerializable, Versioned, NamedConfig {
public class CardinalityEstimatorConfig implements IdentifiedDataSerializable, NamedConfig {

/**
* The number of sync backups per estimator
Expand Down
Expand Up @@ -19,7 +19,6 @@
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;
import com.hazelcast.spi.merge.SplitBrainMergeTypeProvider;
import com.hazelcast.spi.merge.SplitBrainMergeTypes;

Expand All @@ -39,7 +38,7 @@
* @param <T> Type of Collection such as List, Set
*/
public abstract class CollectionConfig<T extends CollectionConfig>
implements SplitBrainMergeTypeProvider, IdentifiedDataSerializable, Versioned, NamedConfig {
implements SplitBrainMergeTypeProvider, IdentifiedDataSerializable, NamedConfig {

/**
* Default maximum size for the Configuration.
Expand Down
Expand Up @@ -19,7 +19,6 @@
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;

import java.io.IOException;

Expand All @@ -31,7 +30,7 @@
*
* @since 3.10
*/
public class CountDownLatchConfig implements IdentifiedDataSerializable, Versioned, NamedConfig {
public class CountDownLatchConfig implements IdentifiedDataSerializable, NamedConfig {

private transient CountDownLatchConfigReadOnly readOnly;

Expand Down
Expand Up @@ -20,7 +20,6 @@
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;

import java.io.IOException;

Expand All @@ -30,7 +29,7 @@
/**
* Contains the configuration for an {@link DurableExecutorService}.
*/
public class DurableExecutorConfig implements IdentifiedDataSerializable, Versioned, NamedConfig {
public class DurableExecutorConfig implements IdentifiedDataSerializable, NamedConfig {

/**
* The number of executor threads per Member for the Executor based on this configuration.
Expand Down
Expand Up @@ -19,14 +19,13 @@
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;

import java.io.IOException;

/**
* Contains the configuration for an {@link com.hazelcast.core.IExecutorService}.
*/
public class ExecutorConfig implements IdentifiedDataSerializable, Versioned, NamedConfig {
public class ExecutorConfig implements IdentifiedDataSerializable, NamedConfig {

/**
* The number of executor threads per Member for the Executor based on this configuration.
Expand Down
34 changes: 13 additions & 21 deletions hazelcast/src/main/java/com/hazelcast/config/MapConfig.java
Expand Up @@ -16,13 +16,11 @@

package com.hazelcast.config;

import com.hazelcast.internal.cluster.Versions;
import com.hazelcast.map.eviction.MapEvictionPolicy;
import com.hazelcast.map.merge.PutIfAbsentMapMergePolicy;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.nio.serialization.impl.Versioned;
import com.hazelcast.spi.merge.SplitBrainMergePolicy;
import com.hazelcast.spi.merge.SplitBrainMergeTypeProvider;
import com.hazelcast.spi.merge.SplitBrainMergeTypes;
Expand All @@ -43,7 +41,7 @@
/**
* Contains the configuration for an {@link com.hazelcast.core.IMap}.
*/
public class MapConfig implements SplitBrainMergeTypeProvider, IdentifiedDataSerializable, Versioned, NamedConfig {
public class MapConfig implements SplitBrainMergeTypeProvider, IdentifiedDataSerializable, NamedConfig {

/**
* The minimum number of backups
Expand Down Expand Up @@ -199,12 +197,12 @@ public MapConfig(MapConfig config) {
this.statisticsEnabled = config.statisticsEnabled;
this.mergePolicyConfig = config.mergePolicyConfig;
this.wanReplicationRef = config.wanReplicationRef != null ? new WanReplicationRef(config.wanReplicationRef) : null;
this.entryListenerConfigs = new ArrayList<EntryListenerConfig>(config.getEntryListenerConfigs());
this.entryListenerConfigs = new ArrayList<>(config.getEntryListenerConfigs());
this.partitionLostListenerConfigs =
new ArrayList<MapPartitionLostListenerConfig>(config.getPartitionLostListenerConfigs());
this.mapIndexConfigs = new ArrayList<MapIndexConfig>(config.getMapIndexConfigs());
this.mapAttributeConfigs = new ArrayList<MapAttributeConfig>(config.getMapAttributeConfigs());
this.queryCacheConfigs = new ArrayList<QueryCacheConfig>(config.getQueryCacheConfigs());
new ArrayList<>(config.getPartitionLostListenerConfigs());
this.mapIndexConfigs = new ArrayList<>(config.getMapIndexConfigs());
this.mapAttributeConfigs = new ArrayList<>(config.getMapAttributeConfigs());
this.queryCacheConfigs = new ArrayList<>(config.getQueryCacheConfigs());
this.partitioningStrategyConfig = config.partitioningStrategyConfig != null
? new PartitioningStrategyConfig(config.getPartitioningStrategyConfig()) : null;
this.quorumName = config.quorumName;
Expand Down Expand Up @@ -650,7 +648,7 @@ public MapConfig addEntryListenerConfig(EntryListenerConfig listenerConfig) {

public List<EntryListenerConfig> getEntryListenerConfigs() {
if (entryListenerConfigs == null) {
entryListenerConfigs = new ArrayList<EntryListenerConfig>();
entryListenerConfigs = new ArrayList<>();
}
return entryListenerConfigs;
}
Expand All @@ -667,7 +665,7 @@ public MapConfig addMapPartitionLostListenerConfig(MapPartitionLostListenerConfi

public List<MapPartitionLostListenerConfig> getPartitionLostListenerConfigs() {
if (partitionLostListenerConfigs == null) {
partitionLostListenerConfigs = new ArrayList<MapPartitionLostListenerConfig>();
partitionLostListenerConfigs = new ArrayList<>();
}

return partitionLostListenerConfigs;
Expand All @@ -686,7 +684,7 @@ public MapConfig addMapIndexConfig(MapIndexConfig mapIndexConfig) {

public List<MapIndexConfig> getMapIndexConfigs() {
if (mapIndexConfigs == null) {
mapIndexConfigs = new ArrayList<MapIndexConfig>();
mapIndexConfigs = new ArrayList<>();
}
return mapIndexConfigs;
}
Expand All @@ -703,7 +701,7 @@ public MapConfig addMapAttributeConfig(MapAttributeConfig mapAttributeConfig) {

public List<MapAttributeConfig> getMapAttributeConfigs() {
if (mapAttributeConfigs == null) {
mapAttributeConfigs = new ArrayList<MapAttributeConfig>();
mapAttributeConfigs = new ArrayList<>();
}
return mapAttributeConfigs;
}
Expand Down Expand Up @@ -759,7 +757,7 @@ public MapConfig addQueryCacheConfig(QueryCacheConfig queryCacheConfig) {
*/
public List<QueryCacheConfig> getQueryCacheConfigs() {
if (queryCacheConfigs == null) {
queryCacheConfigs = new ArrayList<QueryCacheConfig>();
queryCacheConfigs = new ArrayList<>();
}
return queryCacheConfigs;
}
Expand Down Expand Up @@ -1094,10 +1092,7 @@ public void writeData(ObjectDataOutput out) throws IOException {
out.writeObject(partitioningStrategyConfig);
out.writeUTF(quorumName);
out.writeObject(hotRestartConfig);
// RU_COMPAT_3_11
if (out.getVersion().isGreaterOrEqual(Versions.V3_12)) {
out.writeShort(metadataPolicy.getId());
}
out.writeShort(metadataPolicy.getId());
}

@Override
Expand Down Expand Up @@ -1126,9 +1121,6 @@ public void readData(ObjectDataInput in) throws IOException {
partitioningStrategyConfig = in.readObject();
quorumName = in.readUTF();
hotRestartConfig = in.readObject();
// RU_COMPAT_3_11
if (in.getVersion().isGreaterOrEqual(Versions.V3_12)) {
metadataPolicy = MetadataPolicy.getById(in.readShort());
}
metadataPolicy = MetadataPolicy.getById(in.readShort());
}
}

0 comments on commit dd88114

Please sign in to comment.