Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle empty partition correctly list on OperastionService.invokeOnPartitions #14266

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import com.hazelcast.spi.impl.operationservice.impl.operations.PartitionIteratingOperation;
import com.hazelcast.spi.impl.operationservice.impl.operations.PartitionIteratingOperation.PartitionResponse;

import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
Expand Down Expand Up @@ -103,6 +104,13 @@ private void ensureNotCallingFromPartitionOperationThread() {
}

private void invokeOnAllPartitions() {
if (memberPartitions.isEmpty()) {
future.setResult(Collections.EMPTY_MAP);
if (callback != null) {
callback.onResponse(Collections.EMPTY_MAP);
}
return;
}
for (final Map.Entry<Address, List<Integer>> mp : memberPartitions.entrySet()) {
final Address address = mp.getKey();
List<Integer> partitions = mp.getValue();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package com.hazelcast.spi.impl.operationservice.impl;

import com.hazelcast.config.Config;
import com.hazelcast.core.ExecutionCallback;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
Expand All @@ -35,7 +36,13 @@
import org.junit.runner.RunWith;

import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedList;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicReference;

import static com.hazelcast.spi.properties.GroupProperty.OPERATION_CALL_TIMEOUT_MILLIS;
import static com.hazelcast.spi.properties.GroupProperty.PARTITION_COUNT;
Expand All @@ -46,10 +53,8 @@
public class OperationServiceImpl_invokeOnPartitionsTest extends HazelcastTestSupport {

@Test
public void test() throws Exception {
Config config = new Config()
.setProperty(PARTITION_COUNT.getName(), "" + 100);
config.getSerializationConfig().addDataSerializableFactory(123, new SlowOperationSerializationFactory());
public void test_onAllPartitions() throws Exception {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Expand All @@ -62,11 +67,177 @@ public void test() throws Exception {
}
}

@Test
public void test_onSelectedPartitions() throws Exception {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Collection<Integer> partitions = new LinkedList<Integer>();
Collections.addAll(partitions, 1, 2, 3);
Map<Integer, Object> result = opService.invokeOnPartitions(null, new OperationFactoryImpl(), partitions);

assertEquals(3, result.size());
for (Map.Entry<Integer, Object> entry : result.entrySet()) {
int partitionId = entry.getKey();
assertEquals(partitionId * 2, entry.getValue());
}
}

@Test
public void test_onEmptyPartitionLIst() throws Exception {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Map<Integer, Object> result = opService.invokeOnPartitions(null, new OperationFactoryImpl(), Collections.EMPTY_LIST);

assertEquals(0, result.size());
}

@Test
public void testAsync_onAllPartitions_getResponeViaFuture() throws Exception {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Future<Map<Integer, Object>> future = opService.invokeOnAllPartitionsAsync(null, new OperationFactoryImpl(), null);

Map<Integer, Object> result = future.get();
assertEquals(100, result.size());
for (Map.Entry<Integer, Object> entry : result.entrySet()) {
int partitionId = entry.getKey();
assertEquals(partitionId * 2, entry.getValue());
}
}

@Test
public void testAsync_onSelectedPartitions_getResponeViaFuture() throws Exception {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Collection<Integer> partitions = new LinkedList<Integer>();
Collections.addAll(partitions, 1, 2, 3);
Future<Map<Integer, Object>> future = opService.invokeOnPartitionsAsync(null, new OperationFactoryImpl(), partitions, null);

Map<Integer, Object> result = future.get();
assertEquals(3, result.size());
for (Map.Entry<Integer, Object> entry : result.entrySet()) {
int partitionId = entry.getKey();
assertEquals(partitionId * 2, entry.getValue());
}
}

@Test
public void testAsync_onEmptyPartitionList_getResponeViaFuture() throws Exception {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Future<Map<Integer, Object>> future = opService.invokeOnPartitionsAsync(null, new OperationFactoryImpl(), Collections.EMPTY_LIST, null);

Map<Integer, Object> result = future.get();
assertEquals(0, result.size());
}

@Test
public void testAsync_onAllPartitions_getResponseViaCallback() {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

final AtomicReference<Map<Integer, Object>> resultReference = new AtomicReference<Map<Integer, Object>>();
final CountDownLatch responseLatch = new CountDownLatch(1);
ExecutionCallback<Map<Integer, Object>> executionCallback = new ExecutionCallback<Map<Integer, Object>>() {
@Override
public void onResponse(Map<Integer, Object> response) {
resultReference.set(response);
responseLatch.countDown();
}

@Override
public void onFailure(Throwable t) {

}
};
opService.invokeOnAllPartitionsAsync(null, new OperationFactoryImpl(), executionCallback);

assertOpenEventually(responseLatch);
Map<Integer, Object> result = resultReference.get();
assertEquals(100, result.size());
for (Map.Entry<Integer, Object> entry : result.entrySet()) {
int partitionId = entry.getKey();
assertEquals(partitionId * 2, entry.getValue());
}
}

@Test
public void testAsync_onSelectedPartitions_getResponseViaCallback() {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Collection<Integer> partitions = new LinkedList<Integer>();
Collections.addAll(partitions, 1, 2, 3);

final AtomicReference<Map<Integer, Object>> resultReference = new AtomicReference<Map<Integer, Object>>();
final CountDownLatch responseLatch = new CountDownLatch(1);
ExecutionCallback<Map<Integer, Object>> executionCallback = new ExecutionCallback<Map<Integer, Object>>() {
@Override
public void onResponse(Map<Integer, Object> response) {
resultReference.set(response);
responseLatch.countDown();
}

@Override
public void onFailure(Throwable t) {

}
};
opService.invokeOnPartitionsAsync(null, new OperationFactoryImpl(), partitions, executionCallback);

assertOpenEventually(responseLatch);
Map<Integer, Object> result = resultReference.get();
assertEquals(3, result.size());
for (Map.Entry<Integer, Object> entry : result.entrySet()) {
int partitionId = entry.getKey();
assertEquals(partitionId * 2, entry.getValue());
}
}

@Test
public void testAsync_onEmptyPartitionList_getResponseViaCallback() {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

final AtomicReference<Map<Integer, Object>> resultReference = new AtomicReference<Map<Integer, Object>>();
final CountDownLatch responseLatch = new CountDownLatch(1);
ExecutionCallback<Map<Integer, Object>> executionCallback = new ExecutionCallback<Map<Integer, Object>>() {
@Override
public void onResponse(Map<Integer, Object> response) {
resultReference.set(response);
responseLatch.countDown();
}

@Override
public void onFailure(Throwable t) {

}
};
opService.invokeOnPartitionsAsync(null, new OperationFactoryImpl(), Collections.EMPTY_LIST, executionCallback);

assertOpenEventually(responseLatch);
Map<Integer, Object> result = resultReference.get();
assertEquals(0, result.size());
}

@Test
public void testLongRunning() throws Exception {
Config config = new Config()
.setProperty(OPERATION_CALL_TIMEOUT_MILLIS.getName(), "2000")
.setProperty(PARTITION_COUNT.getName(), "" + 100);
.setProperty(PARTITION_COUNT.getName(), "10");
config.getSerializationConfig().addDataSerializableFactory(123, new SlowOperationSerializationFactory());
TestHazelcastInstanceFactory hzFactory = createHazelcastInstanceFactory(2);
HazelcastInstance hz1 = hzFactory.newHazelcastInstance(config);
Expand All @@ -76,7 +247,7 @@ public void testLongRunning() throws Exception {

Map<Integer, Object> result = opService.invokeOnAllPartitions(null, new SlowOperationFactoryImpl());

assertEquals(100, result.size());
assertEquals(10, result.size());
for (Map.Entry<Integer, Object> entry : result.entrySet()) {
int partitionId = entry.getKey();
assertEquals(partitionId * 2, entry.getValue());
Expand All @@ -85,9 +256,9 @@ public void testLongRunning() throws Exception {

@Test
public void testPartitionScopeIsRespectedForPartitionAwareFactories() throws Exception {
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "" + 100);
Config config = new Config().setProperty(PARTITION_COUNT.getName(), "100");
config.getSerializationConfig()
.addDataSerializableFactory(321, new PartitionAwareOperationFactoryDataSerializableFactory());
.addDataSerializableFactory(321, new PartitionAwareOperationFactoryDataSerializableFactory());
HazelcastInstance hz = createHazelcastInstance(config);
OperationServiceImpl opService = getOperationServiceImpl(hz);

Expand Down