Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix WAN events fired after split-brain merges where values don't change [HZ-2620] #24928

Merged
merged 14 commits into from
Aug 10, 2023
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1782,7 +1782,7 @@ public Set<Data> loadAll(Set<Data> keys, boolean replaceExistingValues) {
}

@Override
public CacheRecord merge(CacheMergeTypes<Object, Object> mergingEntry,
public CacheMergeResponse merge(CacheMergeTypes<Object, Object> mergingEntry,
SplitBrainMergePolicy<Object, CacheMergeTypes<Object, Object>, Object> mergePolicy,
CallerProvenance callerProvenance) {
final long now = Clock.currentTimeMillis();
Expand All @@ -1791,7 +1791,7 @@ public CacheRecord merge(CacheMergeTypes<Object, Object> mergingEntry,
mergingEntry = injectDependencies(mergingEntry);
mergePolicy = injectDependencies(mergePolicy);

boolean merged = false;
CacheMergeResponse.MergeResult result = CacheMergeResponse.MergeResult.NO_MERGE_APPLIED;
Data key = (Data) mergingEntry.getRawKey();
long expiryTime = mergingEntry.getExpirationTime();
R record = records.get(key);
Expand All @@ -1802,35 +1802,38 @@ public CacheRecord merge(CacheMergeTypes<Object, Object> mergingEntry,
Object newValue = mergePolicy.merge(mergingEntry, null);
if (newValue != null) {
record = createRecordWithExpiry(key, newValue, expiryTime, now, disableWriteThrough, IGNORE_COMPLETION);
merged = record != null;
if (record != null) {
result = CacheMergeResponse.MergeResult.RECORD_CREATED;
}
}
} else {
Data oldValue = ss.toData(record.getValue());
CacheMergeTypes<Object, Object> existingEntry = createMergingEntry(ss, key, oldValue, record);
Object newValue = mergePolicy.merge(mergingEntry, existingEntry);

merged = updateWithMergingValue(key, oldValue, newValue, record, expiryTime, now, disableWriteThrough);
result = updateWithMergingValue(key, oldValue, newValue, record, expiryTime, now, disableWriteThrough);
}

if (merged && isStatisticsEnabled()) {
if (result.isMergeApplied() && isStatisticsEnabled()) {
statistics.increaseCachePuts(1);
statistics.addPutTimeNanos(Timer.nanosElapsed(startNanos));
}

return merged ? record : null;
return result.isMergeApplied() ? new CacheMergeResponse(record, result) : new CacheMergeResponse(null, result);
}

private boolean updateWithMergingValue(Data key, Object existingValue, Object mergingValue,
private CacheMergeResponse.MergeResult updateWithMergingValue(Data key, Object existingValue, Object mergingValue,
R record, long expiryTime, long now, boolean disableWriteThrough) {

if (valueComparator.isEqual(existingValue, mergingValue, ss)) {
updateExpiryTime(record, expiryTime);
processExpiredEntry(key, record, now);
return true;
return CacheMergeResponse.MergeResult.VALUES_ARE_EQUAL;
}

return updateRecordWithExpiry(key, mergingValue, record, TIME_NOT_AVAILABLE,
now, disableWriteThrough, IGNORE_COMPLETION);
boolean updateResult = updateRecordWithExpiry(key, mergingValue, record, TIME_NOT_AVAILABLE, now, disableWriteThrough,
IGNORE_COMPLETION);
return updateResult ? CacheMergeResponse.MergeResult.RECORD_UPDATED : CacheMergeResponse.MergeResult.NO_MERGE_APPLIED;
}

private Object getExpiryPolicyOrNull(R record) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/*
* Copyright (c) 2008-2023, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.hazelcast.cache.impl;

import com.hazelcast.cache.impl.record.CacheRecord;
import com.hazelcast.spi.merge.SplitBrainMergePolicy;
import com.hazelcast.spi.merge.SplitBrainMergeTypes;
import com.hazelcast.wan.impl.CallerProvenance;

import javax.annotation.Nonnull;
import javax.annotation.Nullable;

/**
* Special response class used to provide verbose results for {@link com.hazelcast.cache.ICache}
* merge operations, specifically from {@link ICacheRecordStore#merge(SplitBrainMergeTypes.CacheMergeTypes,
* SplitBrainMergePolicy, CallerProvenance)}
*/
public class CacheMergeResponse {
@Nullable
private final CacheRecord record;
@Nonnull
private final MergeResult result;

public CacheMergeResponse(@Nullable CacheRecord record, @Nonnull MergeResult result) {
this.record = record;
this.result = result;
}

@Nullable
public CacheRecord getRecord() {
return record;
}

@Nonnull
public MergeResult getResult() {
return result;
}

public enum MergeResult {
NO_MERGE_APPLIED(false),
VALUES_ARE_EQUAL(true),
RECORD_CREATED(true),
RECORD_UPDATED(true),
;

private final boolean mergeApplied;

MergeResult(boolean mergeApplied) {
this.mergeApplied = mergeApplied;
}

public boolean isMergeApplied() {
return mergeApplied;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -539,9 +539,9 @@ public interface ICacheRecordStore {
* @param mergingEntry the {@link CacheMergeTypes} instance to merge
* @param mergePolicy the {@link SplitBrainMergePolicy} instance to apply
* @param callerProvenance
* @return the used {@link CacheRecord} if merge is applied, otherwise {@code null}
* @return {@link CacheMergeResponse} indicating the result of the merge
*/
CacheRecord merge(CacheMergeTypes<Object, Object> mergingEntry,
CacheMergeResponse merge(CacheMergeTypes<Object, Object> mergingEntry,
SplitBrainMergePolicy<Object, CacheMergeTypes<Object, Object>, Object> mergePolicy,
CallerProvenance callerProvenance);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,26 +22,26 @@
import com.hazelcast.cache.impl.ICacheService;
import com.hazelcast.cache.impl.record.CacheRecord;
import com.hazelcast.internal.nio.IOUtil;
import com.hazelcast.internal.partition.IPartitionService;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.internal.services.ObjectNamespace;
import com.hazelcast.internal.services.ServiceNamespaceAware;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.impl.operationservice.AbstractNamedOperation;
import com.hazelcast.spi.impl.operationservice.BackupAwareOperation;
import com.hazelcast.internal.services.ObjectNamespace;
import com.hazelcast.spi.impl.operationservice.MutatingOperation;
import com.hazelcast.spi.impl.operationservice.Operation;
import com.hazelcast.spi.impl.operationservice.PartitionAwareOperation;
import com.hazelcast.internal.services.ServiceNamespaceAware;
import com.hazelcast.spi.impl.operationservice.AbstractNamedOperation;
import com.hazelcast.spi.impl.operationservice.MutatingOperation;
import com.hazelcast.internal.partition.IPartitionService;

import javax.cache.CacheException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Map;
import java.util.List;
import java.util.Set;

import static com.hazelcast.internal.util.MapUtil.createHashMap;
import static com.hazelcast.internal.util.SetUtil.createHashSet;

/**
Expand All @@ -59,7 +59,7 @@ public class CacheLoadAllOperation
private boolean replaceExistingValues;
private boolean shouldBackup;

private transient Map<Data, CacheRecord> backupRecords;
private transient List backupPairs;
private transient ICacheRecordStore cache;

private Object response;
Expand Down Expand Up @@ -99,16 +99,17 @@ public void run()
Set<Data> keysLoaded = cache.loadAll(filteredKeys, replaceExistingValues);
int loadedKeyCount = keysLoaded.size();
if (loadedKeyCount > 0) {
backupRecords = createHashMap(loadedKeyCount);
backupPairs = new ArrayList<>(loadedKeyCount * 2);
for (Data key : keysLoaded) {
CacheRecord record = cache.getRecord(key);
// Loaded keys may have been evicted, then record will be null.
// So if the loaded key is evicted, don't send it to backup.
if (record != null) {
backupRecords.put(key, record);
backupPairs.add(key);
backupPairs.add(record);
}
}
shouldBackup = !backupRecords.isEmpty();
shouldBackup = !backupPairs.isEmpty();
}
} catch (CacheException e) {
response = new CacheClearResponse(e);
Expand All @@ -127,7 +128,7 @@ public boolean shouldBackup() {

@Override
public Operation getBackupOperation() {
return new CachePutAllBackupOperation(name, backupRecords);
return new CachePutAllBackupOperation(name, backupPairs);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
package com.hazelcast.cache.impl.operation;

import com.hazelcast.cache.impl.CacheDataSerializerHook;
import com.hazelcast.cache.impl.record.CacheRecord;
import com.hazelcast.cache.impl.CacheMergeResponse;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.internal.serialization.Data;
Expand All @@ -28,10 +28,9 @@

import java.io.IOException;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import java.util.Map;

import static com.hazelcast.internal.util.MapUtil.createHashMap;
import static com.hazelcast.wan.impl.CallerProvenance.NOT_WAN;

/**
Expand All @@ -45,7 +44,8 @@ public class CacheMergeOperation extends CacheOperation implements BackupAwareOp
private SplitBrainMergePolicy<Object, CacheMergeTypes<Object, Object>, Object> mergePolicy;

private transient boolean hasBackups;
private transient Map<Data, CacheRecord> backupRecords;
private transient List backupPairs;
private transient BitSet backupNonReplicatedKeys;

public CacheMergeOperation() {
}
Expand All @@ -61,7 +61,8 @@ public CacheMergeOperation(String name, List<CacheMergeTypes<Object, Object>> me
protected void beforeRunInternal() {
hasBackups = getSyncBackupCount() + getAsyncBackupCount() > 0;
if (hasBackups) {
backupRecords = createHashMap(mergingEntries.size());
backupPairs = new ArrayList(mergingEntries.size());
backupNonReplicatedKeys = new BitSet(mergingEntries.size());
}
}

Expand All @@ -75,13 +76,20 @@ public void run() {
private void merge(CacheMergeTypes<Object, Object> mergingEntry) {
Data dataKey = (Data) mergingEntry.getRawKey();

CacheRecord backupRecord = recordStore.merge(mergingEntry, mergePolicy, NOT_WAN);
if (backupRecords != null && backupRecord != null) {
backupRecords.put(dataKey, backupRecord);
CacheMergeResponse response = recordStore.merge(mergingEntry, mergePolicy, NOT_WAN);
if (backupPairs != null && response.getResult().isMergeApplied()) {
if (response.getResult() == CacheMergeResponse.MergeResult.VALUES_ARE_EQUAL) {
backupNonReplicatedKeys.set(backupPairs.size() / 2);
}
backupPairs.add(dataKey);
backupPairs.add(response.getRecord());
}
if (recordStore.isWanReplicationEnabled()) {
if (backupRecord != null) {
publishWanUpdate(dataKey, backupRecord);
if (response.getResult().isMergeApplied()) {
// Don't WAN replicate merge events where values don't change
if (response.getResult() != CacheMergeResponse.MergeResult.VALUES_ARE_EQUAL) {
publishWanUpdate(dataKey, response.getRecord());
}
} else {
publishWanRemove(dataKey);
}
Expand All @@ -90,17 +98,17 @@ private void merge(CacheMergeTypes<Object, Object> mergingEntry) {

@Override
public Object getResponse() {
return hasBackups && !backupRecords.isEmpty();
return hasBackups && !backupPairs.isEmpty();
}

@Override
public boolean shouldBackup() {
return hasBackups && !backupRecords.isEmpty();
return hasBackups && !backupPairs.isEmpty();
}

@Override
public Operation getBackupOperation() {
return new CachePutAllBackupOperation(name, backupRecords);
return new CachePutAllBackupOperation(name, backupPairs, backupNonReplicatedKeys);
}

@Override
Expand Down