Skip to content

Commit

Permalink
HBASE-19633 Clean up the replication queues in the postPeerModificati…
Browse files Browse the repository at this point in the history
…on stage when removing a peer
  • Loading branch information
Apache9 committed Jan 9, 2018
1 parent f89920a commit 19707a8
Show file tree
Hide file tree
Showing 20 changed files with 134 additions and 130 deletions.
Expand Up @@ -27,8 +27,8 @@
import java.util.TreeMap; import java.util.TreeMap;


import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;


/** /**
* A configuration for the replication peer cluster. * A configuration for the replication peer cluster.
Expand Down
Expand Up @@ -21,7 +21,6 @@
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.UUID; import java.util.UUID;

import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
Expand All @@ -45,13 +44,14 @@
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.mapreduce.TableSplit;
import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
Expand All @@ -66,6 +66,7 @@
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;


/** /**
Expand Down Expand Up @@ -333,19 +334,24 @@ private static Pair<ReplicationPeerConfig, Configuration> getPeerQuorumConfig(
final Configuration conf, String peerId) throws IOException { final Configuration conf, String peerId) throws IOException {
ZKWatcher localZKW = null; ZKWatcher localZKW = null;
try { try {
localZKW = new ZKWatcher(conf, "VerifyReplication", localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() {
new Abortable() { @Override
@Override public void abort(String why, Throwable e) {} public void abort(String why, Throwable e) {
@Override public boolean isAborted() {return false;} }
});

ReplicationPeers rp = ReplicationFactory.getReplicationPeers(localZKW, conf);
rp.init();


return Pair.newPair(rp.getPeerConfig(peerId), rp.getPeerClusterConfiguration(peerId)); @Override
public boolean isAborted() {
return false;
}
});
ReplicationPeerStorage storage =
ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId);
return Pair.newPair(peerConfig,
ReplicationPeers.getPeerClusterConfiguration(peerConfig, conf));
} catch (ReplicationException e) { } catch (ReplicationException e) {
throw new IOException( throw new IOException("An error occurred while trying to connect to the remove peer cluster",
"An error occurred while trying to connect to the remove peer cluster", e); e);
} finally { } finally {
if (localZKW != null) { if (localZKW != null) {
localZKW.close(); localZKW.close();
Expand Down
Expand Up @@ -27,8 +27,6 @@
import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;


Expand All @@ -39,20 +37,22 @@
@InterfaceAudience.Private @InterfaceAudience.Private
public class ReplicationPeers { public class ReplicationPeers {


private static final Logger LOG = LoggerFactory.getLogger(ReplicationPeers.class);

private final Configuration conf; private final Configuration conf;


// Map of peer clusters keyed by their id // Map of peer clusters keyed by their id
private final ConcurrentMap<String, ReplicationPeerImpl> peerCache; private final ConcurrentMap<String, ReplicationPeerImpl> peerCache;
private final ReplicationPeerStorage peerStorage; private final ReplicationPeerStorage peerStorage;


protected ReplicationPeers(ZKWatcher zookeeper, Configuration conf) { ReplicationPeers(ZKWatcher zookeeper, Configuration conf) {
this.conf = conf; this.conf = conf;
this.peerCache = new ConcurrentHashMap<>(); this.peerCache = new ConcurrentHashMap<>();
this.peerStorage = ReplicationStorageFactory.getReplicationPeerStorage(zookeeper, conf); this.peerStorage = ReplicationStorageFactory.getReplicationPeerStorage(zookeeper, conf);
} }


public Configuration getConf() {
return conf;
}

public void init() throws ReplicationException { public void init() throws ReplicationException {
// Loading all existing peerIds into peer cache. // Loading all existing peerIds into peer cache.
for (String peerId : this.peerStorage.listPeerIds()) { for (String peerId : this.peerStorage.listPeerIds()) {
Expand Down Expand Up @@ -120,22 +120,13 @@ public Set<String> getAllPeerIds() {
return peerCache.keySet(); return peerCache.keySet();
} }


public ReplicationPeerConfig getPeerConfig(String peerId) { public static Configuration getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,
ReplicationPeer replicationPeer = this.peerCache.get(peerId); Configuration baseConf) throws ReplicationException {
if (replicationPeer == null) {
throw new IllegalArgumentException("Peer with id= " + peerId + " is not cached");
}
return replicationPeer.getPeerConfig();
}

public Configuration getPeerClusterConfiguration(String peerId) throws ReplicationException {
ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);

Configuration otherConf; Configuration otherConf;
try { try {
otherConf = HBaseConfiguration.createClusterConf(this.conf, peerConfig.getClusterKey()); otherConf = HBaseConfiguration.createClusterConf(baseConf, peerConfig.getClusterKey());
} catch (IOException e) { } catch (IOException e) {
throw new ReplicationException("Can't get peer configuration for peerId=" + peerId, e); throw new ReplicationException("Can't get peer configuration for peer " + peerConfig, e);
} }


if (!peerConfig.getConfiguration().isEmpty()) { if (!peerConfig.getConfiguration().isEmpty()) {
Expand Down Expand Up @@ -172,8 +163,9 @@ public ReplicationPeerConfig refreshPeerConfig(String peerId) throws Replication
* @return object representing the peer * @return object representing the peer
*/ */
private ReplicationPeerImpl createPeer(String peerId) throws ReplicationException { private ReplicationPeerImpl createPeer(String peerId) throws ReplicationException {
ReplicationPeerConfig peerConf = peerStorage.getPeerConfig(peerId); ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);
boolean enabled = peerStorage.isPeerEnabled(peerId); boolean enabled = peerStorage.isPeerEnabled(peerId);
return new ReplicationPeerImpl(getPeerClusterConfiguration(peerId), peerId, enabled, peerConf); return new ReplicationPeerImpl(getPeerClusterConfiguration(peerConfig, conf), peerId, enabled,
peerConfig);
} }
} }
Expand Up @@ -27,7 +27,6 @@
import java.util.Set; import java.util.Set;
import java.util.SortedSet; import java.util.SortedSet;
import java.util.TreeSet; import java.util.TreeSet;

import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
Expand All @@ -50,7 +49,7 @@
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;


import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;


/** /**
* ZK based replication queue storage. * ZK based replication queue storage.
Expand Down
Expand Up @@ -19,13 +19,13 @@


import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;

import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;


import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;


Expand Down
Expand Up @@ -19,7 +19,6 @@


import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;


Expand Down Expand Up @@ -238,12 +237,6 @@ public void testReplicationPeers() throws Exception {
} catch (ReplicationException e) { } catch (ReplicationException e) {
} }


try {
assertNull(rp.getPeerClusterConfiguration("bogus"));
fail("Should have thrown an ReplicationException when passed a bogus peerId");
} catch (ReplicationException e) {
}

assertNumberOfPeers(0); assertNumberOfPeers(0);


// Add some peers // Add some peers
Expand All @@ -258,7 +251,8 @@ public void testReplicationPeers() throws Exception {
fail("There are no connected peers, should have thrown an IllegalArgumentException"); fail("There are no connected peers, should have thrown an IllegalArgumentException");
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
} }
assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(rp.getPeerClusterConfiguration(ID_ONE))); assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(ReplicationPeers
.getPeerClusterConfiguration(rp.getPeerStorage().getPeerConfig(ID_ONE), rp.getConf())));
rp.getPeerStorage().removePeer(ID_ONE); rp.getPeerStorage().removePeer(ID_ONE);
rp.removePeer(ID_ONE); rp.removePeer(ID_ONE);
assertNumberOfPeers(1); assertNumberOfPeers(1);
Expand Down
Expand Up @@ -19,7 +19,6 @@


import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors;
import com.google.protobuf.Service; import com.google.protobuf.Service;

import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
Expand Down Expand Up @@ -51,12 +50,10 @@
import java.util.function.Function; import java.util.function.Function;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import java.util.stream.Collectors; import java.util.stream.Collectors;

import javax.servlet.ServletException; import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;

import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics;
Expand Down Expand Up @@ -207,6 +204,7 @@
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Maps;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
Expand Down
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.master.replication; package org.apache.hadoop.hbase.master.replication;


import java.io.IOException; import java.io.IOException;

import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
Expand Down Expand Up @@ -74,8 +73,8 @@ protected void updatePeerStorage(MasterProcedureEnv env) throws ReplicationExcep


@Override @Override
protected void postPeerModification(MasterProcedureEnv env) throws IOException { protected void postPeerModification(MasterProcedureEnv env) throws IOException {
LOG.info("Successfully added " + (enabled ? "ENABLED" : "DISABLED") + " peer " + peerId + LOG.info("Successfully added {} peer {}, config {}", enabled ? "ENABLED" : "DISABLED", peerId,
", config " + peerConfig); peerConfig);
MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) { if (cpHost != null) {
env.getMasterCoprocessorHost().postAddReplicationPeer(peerId, peerConfig); env.getMasterCoprocessorHost().postAddReplicationPeer(peerId, peerConfig);
Expand Down
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.master.replication; package org.apache.hadoop.hbase.master.replication;


import java.io.IOException; import java.io.IOException;

import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationException;
Expand Down Expand Up @@ -62,7 +61,7 @@ protected void updatePeerStorage(MasterProcedureEnv env) throws ReplicationExcep


@Override @Override
protected void postPeerModification(MasterProcedureEnv env) throws IOException { protected void postPeerModification(MasterProcedureEnv env) throws IOException {
LOG.info("Successfully disabled peer " + peerId); LOG.info("Successfully disabled peer {}", peerId);
MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) { if (cpHost != null) {
cpHost.postDisableReplicationPeer(peerId); cpHost.postDisableReplicationPeer(peerId);
Expand Down
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.master.replication; package org.apache.hadoop.hbase.master.replication;


import java.io.IOException; import java.io.IOException;

import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationException;
Expand Down Expand Up @@ -62,7 +61,7 @@ protected void updatePeerStorage(MasterProcedureEnv env) throws ReplicationExcep


@Override @Override
protected void postPeerModification(MasterProcedureEnv env) throws IOException { protected void postPeerModification(MasterProcedureEnv env) throws IOException {
LOG.info("Successfully enabled peer " + peerId); LOG.info("Successfully enabled peer {}", peerId);
MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) { if (cpHost != null) {
cpHost.postEnableReplicationPeer(peerId); cpHost.postEnableReplicationPeer(peerId);
Expand Down
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.master.replication; package org.apache.hadoop.hbase.master.replication;


import java.io.IOException; import java.io.IOException;

import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface; import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
Expand Down Expand Up @@ -84,10 +83,13 @@ protected abstract void prePeerModification(MasterProcedureEnv env)
* Called before we finish the procedure. The implementation can do some logging work, and also * Called before we finish the procedure. The implementation can do some logging work, and also
* call the coprocessor hook if any. * call the coprocessor hook if any.
* <p> * <p>
* Notice that, since we have already done the actual work, throwing exception here will not fail * Notice that, since we have already done the actual work, throwing {@code IOException} here will
* this procedure, we will just ignore it and finish the procedure as suceeded. * not fail this procedure, we will just ignore it and finish the procedure as suceeded. If
* {@code ReplicationException} is thrown we will retry since this usually means we fails to
* update the peer storage.
*/ */
protected abstract void postPeerModification(MasterProcedureEnv env) throws IOException; protected abstract void postPeerModification(MasterProcedureEnv env)
throws IOException, ReplicationException;


private void releaseLatch() { private void releaseLatch() {
ProcedurePrepareLatch.releaseLatch(latch, this); ProcedurePrepareLatch.releaseLatch(latch, this);
Expand All @@ -101,16 +103,14 @@ protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState st
try { try {
prePeerModification(env); prePeerModification(env);
} catch (IOException e) { } catch (IOException e) {
LOG.warn( LOG.warn("{} failed to call pre CP hook or the pre check is failed for peer {}, " +
getClass().getName() + " failed to call CP hook or the pre check is failed for peer " + "mark the procedure as failure and give up", getClass().getName(), peerId, e);
peerId + ", mark the procedure as failure and give up",
e);
setFailure("master-" + getPeerOperationType().name().toLowerCase() + "-peer", e); setFailure("master-" + getPeerOperationType().name().toLowerCase() + "-peer", e);
releaseLatch(); releaseLatch();
return Flow.NO_MORE_STATE; return Flow.NO_MORE_STATE;
} catch (ReplicationException e) { } catch (ReplicationException e) {
LOG.warn(getClass().getName() + " failed to call prePeerModification for peer " + peerId + LOG.warn("{} failed to call prePeerModification for peer {}, retry", getClass().getName(),
", retry", e); peerId, e);
throw new ProcedureYieldException(); throw new ProcedureYieldException();
} }
setNextState(PeerModificationState.UPDATE_PEER_STORAGE); setNextState(PeerModificationState.UPDATE_PEER_STORAGE);
Expand All @@ -119,8 +119,8 @@ protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState st
try { try {
updatePeerStorage(env); updatePeerStorage(env);
} catch (ReplicationException e) { } catch (ReplicationException e) {
LOG.warn( LOG.warn("{} update peer storage for peer {} failed, retry", getClass().getName(), peerId,
getClass().getName() + " update peer storage for peer " + peerId + " failed, retry", e); e);
throw new ProcedureYieldException(); throw new ProcedureYieldException();
} }
setNextState(PeerModificationState.REFRESH_PEER_ON_RS); setNextState(PeerModificationState.REFRESH_PEER_ON_RS);
Expand All @@ -134,9 +134,13 @@ protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState st
case POST_PEER_MODIFICATION: case POST_PEER_MODIFICATION:
try { try {
postPeerModification(env); postPeerModification(env);
} catch (ReplicationException e) {
LOG.warn("{} failed to call postPeerModification for peer {}, retry",
getClass().getName(), peerId, e);
throw new ProcedureYieldException();
} catch (IOException e) { } catch (IOException e) {
LOG.warn(getClass().getName() + " failed to call prePeerModification for peer " + peerId + LOG.warn("{} failed to call post CP hook for peer {}, " +
", ignore since the procedure has already done", e); "ignore since the procedure has already done", getClass().getName(), peerId, e);
} }
releaseLatch(); releaseLatch();
return Flow.NO_MORE_STATE; return Flow.NO_MORE_STATE;
Expand Down Expand Up @@ -175,7 +179,7 @@ protected void rollbackState(MasterProcedureEnv env, PeerModificationState state
throws IOException, InterruptedException { throws IOException, InterruptedException {
if (state == PeerModificationState.PRE_PEER_MODIFICATION) { if (state == PeerModificationState.PRE_PEER_MODIFICATION) {
// actually the peer related operations has no rollback, but if we haven't done any // actually the peer related operations has no rollback, but if we haven't done any
// modifications on the peer storage, we can just return. // modifications on the peer storage yet, we can just return.
return; return;
} }
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
Expand Down

0 comments on commit 19707a8

Please sign in to comment.