Skip to content
Browse files

Merge pull request #452 from davidvossel/master

enable baremetal connection resource migration support by default + remote cts improvements
  • Loading branch information...
2 parents ca8234d + dd7599e commit 2d005973d4831649596a63c745c1ea50fc0ba094 @davidvossel davidvossel committed Mar 3, 2014
Showing with 155 additions and 107 deletions.
  1. +8 −8 crmd/lrm.c
  2. +3 −3 cts/CM_ais.py
  3. +5 −5 cts/CTS.py
  4. +6 −6 cts/CTSscenarios.py
  5. +88 −31 cts/CTStests.py
  6. +18 −1 lib/pengine/complex.c
  7. +6 −10 pengine/test10/remote-move.dot
  8. +17 −37 pengine/test10/remote-move.exp
  9. +4 −6 pengine/test10/remote-move.summary
View
16 crmd/lrm.c
@@ -2070,25 +2070,25 @@ process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op)
switch (op->op_status) {
case PCMK_LRM_OP_CANCELLED:
- crm_info("Operation %s: %s (call=%d, confirmed=%s)",
- op_key, services_lrm_status_str(op->op_status),
+ crm_info("Operation %s: %s (node=%s, call=%d, confirmed=%s)",
+ op_key, lrm_state->node_name, services_lrm_status_str(op->op_status),
op->call_id, removed ? "true" : "false");
break;
case PCMK_LRM_OP_DONE:
- crm_notice("Operation %s: %s (call=%d, rc=%d, cib-update=%d, confirmed=%s)",
- op_key, services_ocf_exitcode_str(op->rc),
+ crm_notice("Operation %s: %s (node=%s, call=%d, rc=%d, cib-update=%d, confirmed=%s)",
+ lrm_state->node_name, op_key, services_ocf_exitcode_str(op->rc),
op->call_id, op->rc, update_id, removed ? "true" : "false");
break;
case PCMK_LRM_OP_TIMEOUT:
- crm_err("Operation %s: %s (call=%d, timeout=%dms)",
- op_key, services_lrm_status_str(op->op_status), op->call_id, op->timeout);
+ crm_err("Operation %s: %s (node=%s, call=%d, timeout=%dms)",
+ op_key, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op->timeout);
break;
default:
- crm_err("Operation %s (call=%d, status=%d, cib-update=%d, confirmed=%s) %s",
- op_key, op->call_id, op->op_status, update_id, removed ? "true" : "false",
+ crm_err("Operation %s (node=%s, call=%d, status=%d, cib-update=%d, confirmed=%s) %s",
+ op_key, lrm_state->node_name, op->call_id, op->op_status, update_id, removed ? "true" : "false",
services_lrm_status_str(op->op_status));
}
View
6 cts/CM_ais.py
@@ -370,7 +370,7 @@ def __init__(self, Environment, randseed=None):
self.update({
"Name" : "crm-plugin-v1",
"StartCmd" : "service corosync start && service pacemaker start",
- "StopCmd" : "service pacemaker stop; service corosync stop",
+ "StopCmd" : "service pacemaker stop; service pacemaker_remote stop; service corosync stop",
"EpocheCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
@@ -396,7 +396,7 @@ def __init__(self, Environment, randseed=None):
self.update({
"Name" : "crm-mcp",
"StartCmd" : "service corosync start && service pacemaker start",
- "StopCmd" : "service pacemaker stop; service corosync stop",
+ "StopCmd" : "service pacemaker stop; service pacemaker_remote stop; service corosync stop",
"EpocheCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
@@ -433,7 +433,7 @@ def __init__(self, Environment, randseed=None):
self.update({
"Name" : "crm-cman",
"StartCmd" : "service pacemaker start",
- "StopCmd" : "service pacemaker stop",
+ "StopCmd" : "service pacemaker stop; service pacemaker_remote stop",
"EpocheCmd" : "crm_node -e --cman",
"QuorumCmd" : "crm_node -q --cman",
View
10 cts/CTS.py
@@ -1434,14 +1434,14 @@ def StartaCMnoBlock(self, node, verbose=False):
self.ShouldBeStatus[node]="up"
return 1
- def StopaCM(self, node, verbose=False):
+ def StopaCM(self, node, verbose=False, force=False):
'''Stop the cluster manager on a given node'''
if verbose: self.log("Stopping %s on node %s" %(self["Name"], node))
else: self.debug("Stopping %s on node %s" %(self["Name"], node))
- if self.ShouldBeStatus[node] != "up":
+ if self.ShouldBeStatus[node] != "up" and force == False:
return 1
if self.rsh(node, self["StopCmd"]) == 0:
@@ -1557,7 +1557,7 @@ def startall(self, nodelist=None, verbose=False, quick=False):
return 1
- def stopall(self, nodelist=None, verbose=False):
+ def stopall(self, nodelist=None, verbose=False, force=False):
'''Stop the cluster managers on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
@@ -1568,8 +1568,8 @@ def stopall(self, nodelist=None, verbose=False):
if not nodelist:
nodelist=self.Env["nodes"]
for node in self.Env["nodes"]:
- if self.ShouldBeStatus[node] == "up":
- if not self.StopaCM(node, verbose=verbose):
+ if self.ShouldBeStatus[node] == "up" or force == True:
+ if not self.StopaCM(node, verbose=verbose, force=force):
ret = 0
return ret
View
12 cts/CTSscenarios.py
@@ -316,19 +316,19 @@ def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs ;-)
- self.TearDown(CM)
+ self.TearDown(CM, force=True)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all nodes.")
return CM.startall(verbose=True, quick=True)
- def TearDown(self, CM):
+ def TearDown(self, CM, force=False):
'''Set up the given ScenarioComponent'''
# Stop the cluster manager everywhere
CM.log("Stopping Cluster Manager on all nodes")
- return CM.stopall(verbose=True)
+ return CM.stopall(verbose=True, force=force)
class LeaveBooted(BootCluster):
def TearDown(self, CM):
@@ -463,7 +463,7 @@ def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
- self.TearDown(CM)
+ self.TearDown(CM, force=True)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on BSC node(s).")
@@ -486,7 +486,7 @@ def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
- self.TearDown(CM)
+ self.TearDown(CM, force=True)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all node(s).")
@@ -536,7 +536,7 @@ def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
- CM.stopall()
+ CM.stopall(force=True)
CM.log("Downgrading all nodes to %s." % self.Env["previous-version"])
View
119 cts/CTStests.py
@@ -2626,34 +2626,67 @@ def __init__(self, cm):
self.startall = SimulStartLite(cm)
self.stop = StopTest(cm)
self.pcmk_started=0
- self.rsc_added=0
self.failed = 0
self.fail_string = ""
+ self.remote_node_added = 0
+ self.remote_node="remote1"
+ self.remote_rsc_added = 0
+ self.remote_rsc="remote1-rsc"
self.cib_cmd="""cibadmin -C -o %s -X '%s' """
- def del_connection_rsc(self, node):
-
- if self.rsc_added == 0:
- return
+ def del_rsc(self, node, rsc):
for othernode in self.CM.Env["nodes"]:
if othernode == node:
# we don't want to try and use the cib that we just shutdown.
# find a cluster node that is not our soon to be remote-node.
continue
- rc = self.CM.rsh(othernode, "crm_resource -D -r remote1 -t primitive")
+ rc = self.CM.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc))
if rc != 0:
- self.fail_string = ("Connection resource removal failed")
+ self.fail_string = ("Removal of resource '%s' failed" % (rsc))
self.failed = 1
else:
self.fail_string = ""
self.failed = 0
break
+ def add_rsc(self, node, rsc_xml):
+ failed=0
+ fail_string=""
+ for othernode in self.CM.Env["nodes"]:
+ if othernode == node:
+ # we don't want to try and use the cib that we just shutdown.
+ # find a cluster node that is not our soon to be remote-node.
+ continue
+
+ rc = self.CM.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
+ if rc != 0:
+ fail_string = "resource creation failed"
+ failed = 1
+ else:
+ fail_string = ""
+ failed = 0
+ break
+
+ if failed == 1:
+ self.failed=failed
+ self.fail_string=fail_string
+
+ def add_primitive_rsc(self, node):
+ rsc_xml="""
+<primitive class="ocf" id="%s" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="remote1-rsc-monitor-interval-10s" interval="10s" name="monitor"/>
+ </operations>
+</primitive>""" % (self.remote_rsc)
+ self.add_rsc(node, rsc_xml)
+ if self.failed == 0:
+ self.remote_rsc_added=1
+
def add_connection_rsc(self, node):
rsc_xml="""
-<primitive class="ocf" id="remote1" provider="pacemaker" type="remote">
+<primitive class="ocf" id="%s" provider="pacemaker" type="remote">
<instance_attributes id="remote1-instance_attributes"/>
<instance_attributes id="remote1-instance_attributes">
<nvpair id="remote1-instance_attributes-server" name="server" value="%s"/>
@@ -2663,29 +2696,17 @@ def add_connection_rsc(self, node):
<op id="remote1-name-start-interval-0-timeout-60" interval="0" name="start" timeout="60"/>
</operations>
<meta_attributes id="remote1-meta_attributes"/>
-</primitive>""" % node
-
- for othernode in self.CM.Env["nodes"]:
- if othernode == node:
- # we don't want to try and use the cib that we just shutdown.
- # find a cluster node that is not our soon to be remote-node.
- continue
+</primitive>""" % (self.remote_node, node)
+ self.add_rsc(node, rsc_xml)
+ if self.failed == 0:
+ self.remote_node_added=1
- rc = self.CM.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
- if rc != 0:
- self.fail_string = "Connection resource creation failed"
- self.failed = 1
- else:
- self.fail_string = ""
- self.failed = 0
- self.rsc_added = 1
- break
-
- def start_metal(self, node):
+ def step1_start_metal(self, node):
pcmk_started=0
# make sure the resource doesn't already exist for some reason
- self.CM.rsh(node, "crm_resource -D -r remote1 -t primitive")
+ self.CM.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc))
+ self.CM.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node))
if not self.stop(node):
self.failed = 1
@@ -2709,7 +2730,7 @@ def start_metal(self, node):
pats = [ ]
watch = self.create_watch(pats, 120)
watch.setwatch()
- pats.append(self.CM["Pat:RscOpOK"] % ("remote1", "start_0"))
+ pats.append("process_lrm_event: LRM operation %s_start_0.*confirmed.*ok" % (self.remote_node))
self.add_connection_rsc(node)
@@ -2720,6 +2741,33 @@ def start_metal(self, node):
self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
self.failed = 1
+ def step2_add_rsc(self, node):
+ if self.failed == 1:
+ return
+
+
+ # verify we can put a resource on the remote node
+ pats = [ ]
+ watch = self.create_watch(pats, 120)
+ watch.setwatch()
+ pats.append("process_lrm_event: LRM operation %s_start_0.*node=%s, .*confirmed.*ok" % (self.remote_rsc, self.remote_node))
+
+ # Add a resource that must live on remote-node
+ self.add_primitive_rsc(node)
+ # this crm_resource command actually occurs on the remote node
+ # which verifies that the ipc proxy works
+ rc = self.CM.rsh(node, "crm_resource -M -r remote1-rsc -N %s" % (self.remote_node))
+ if rc != 0:
+ self.fail_string = "Failed to place primitive on remote-node"
+ self.failed = 1
+ return
+
+ self.set_timer("remoteMetalRsc")
+ watch.lookforall()
+ self.log_timer("remoteMetalRsc")
+ if watch.unmatched:
+ self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
+ self.failed = 1
def cleanup_metal(self, node):
if self.pcmk_started == 0:
@@ -2729,10 +2777,18 @@ def cleanup_metal(self, node):
watch = self.create_watch(pats, 120)
watch.setwatch()
- pats.append(self.CM["Pat:RscOpOK"] % ("remote1", "stop_0"))
- self.del_connection_rsc(node)
+ if self.remote_rsc_added == 1:
+ pats.append("process_lrm_event: LRM operation %s_stop_0.*confirmed.*ok" % (self.remote_rsc))
+ if self.remote_node_added == 1:
+ pats.append("process_lrm_event: LRM operation %s_stop_0.*confirmed.*ok" % (self.remote_node))
+
self.set_timer("remoteMetalCleanup")
+ if self.remote_rsc_added == 1:
+ self.CM.rsh(node, "crm_resource -U -r remote1-rsc -N %s" % (self.remote_node))
+ self.del_rsc(node, self.remote_rsc)
+ if self.remote_node_added == 1:
+ self.del_rsc(node, self.remote_node)
watch.lookforall()
self.log_timer("remoteMetalCleanup")
@@ -2780,7 +2836,8 @@ def __call__(self, node):
return self.failure("Setup failed, start all nodes failed.")
self.setup_env()
- self.start_metal(node)
+ self.step1_start_metal(node)
+ self.step2_add_rsc(node)
self.cleanup_metal(node)
self.CM.debug("Waiting for the cluster to recover")
View
19 lib/pengine/complex.c
@@ -353,6 +353,8 @@ common_unpack(xmlNode * xml_obj, resource_t ** rsc,
const char *value = NULL;
const char *class = NULL; /* Look for this after any templates have been expanded */
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
+ int container_remote_node = 0;
+ int baremetal_remote_node = 0;
crm_log_xml_trace(xml_obj, "Processing resource input...");
@@ -449,10 +451,25 @@ common_unpack(xmlNode * xml_obj, resource_t ** rsc,
set_bit((*rsc)->flags, pe_rsc_notify);
}
+ if (xml_contains_remote_node((*rsc)->xml)) {
+ if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
+ container_remote_node = 1;
+ } else {
+ baremetal_remote_node = 1;
+ }
+ }
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
set_bit((*rsc)->flags, pe_rsc_allow_migrate);
+ } else if (value == NULL && baremetal_remote_node) {
+ /* by default, we want baremetal remote-nodes to be able
+ * to float around the cluster without having to stop all the
+ * resources within the remote-node before moving. Allowing
+ * migration support enables this feature. If this ever causes
+ * problems, migration support can be explicitly turned off with
+ * allow-migrate=false. */
+ set_bit((*rsc)->flags, pe_rsc_allow_migrate);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
@@ -622,7 +639,7 @@ common_unpack(xmlNode * xml_obj, resource_t ** rsc,
if (is_set(data_set->flags, pe_flag_symmetric_cluster)) {
resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
- } else if (xml_contains_remote_node((*rsc)->xml) && g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
+ } else if (container_remote_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
View
16 pengine/test10/remote-move.dot
@@ -1,17 +1,13 @@
digraph "g" {
-"FAKE2_monitor_60000 remote1" [ style=bold color="green" fontcolor="black"]
-"FAKE2_start_0 remote1" -> "FAKE2_monitor_60000 remote1" [ style = bold]
-"FAKE2_start_0 remote1" [ style=bold color="green" fontcolor="black"]
-"FAKE2_stop_0 remote1" -> "FAKE2_start_0 remote1" [ style = bold]
-"FAKE2_stop_0 remote1" -> "all_stopped" [ style = bold]
-"FAKE2_stop_0 remote1" -> "remote1_stop_0 18builder" [ style = bold]
-"FAKE2_stop_0 remote1" [ style=bold color="green" fontcolor="black"]
"all_stopped" [ style=bold color="green" fontcolor="orange"]
+"remote1_migrate_from_0 18node1" -> "remote1_start_0 18node1" [ style = bold]
+"remote1_migrate_from_0 18node1" -> "remote1_stop_0 18builder" [ style = bold]
+"remote1_migrate_from_0 18node1" [ style=bold color="green" fontcolor="black"]
+"remote1_migrate_to_0 18builder" -> "remote1_migrate_from_0 18node1" [ style = bold]
+"remote1_migrate_to_0 18builder" [ style=bold color="green" fontcolor="black"]
"remote1_monitor_60000 18node1" [ style=bold color="green" fontcolor="black"]
-"remote1_start_0 18node1" -> "FAKE2_monitor_60000 remote1" [ style = bold]
-"remote1_start_0 18node1" -> "FAKE2_start_0 remote1" [ style = bold]
"remote1_start_0 18node1" -> "remote1_monitor_60000 18node1" [ style = bold]
-"remote1_start_0 18node1" [ style=bold color="green" fontcolor="black"]
+"remote1_start_0 18node1" [ style=bold color="green" fontcolor="orange"]
"remote1_stop_0 18builder" -> "all_stopped" [ style = bold]
"remote1_stop_0 18builder" -> "remote1_start_0 18node1" [ style = bold]
"remote1_stop_0 18builder" [ style=bold color="green" fontcolor="black"]
View
54 pengine/test10/remote-move.exp
@@ -36,85 +36,68 @@
</synapse>
<synapse id="3">
<action_set>
- <rsc_op id="19" operation="monitor" operation_key="remote1_monitor_60000" on_node="18node1" on_node_uuid="1">
+ <rsc_op id="21" operation="migrate_from" operation_key="remote1_migrate_from_0" on_node="18node1" on_node_uuid="1">
<primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_timeout="20000" />
+ <attributes CRM_meta_migrate_source="18builder" CRM_meta_migrate_target="18node1" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1"/>
+ <rsc_op id="20" operation="migrate_to" operation_key="remote1_migrate_to_0" on_node="18builder" on_node_uuid="5"/>
</trigger>
</inputs>
</synapse>
<synapse id="4">
<action_set>
- <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1">
+ <rsc_op id="20" operation="migrate_to" operation_key="remote1_migrate_to_0" on_node="18builder" on_node_uuid="5">
<primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_timeout="20000" />
+ <attributes CRM_meta_migrate_source="18builder" CRM_meta_migrate_target="18node1" CRM_meta_record_pending="true" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
- <inputs>
- <trigger>
- <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5"/>
- </trigger>
- </inputs>
+ <inputs/>
</synapse>
<synapse id="5">
<action_set>
- <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5">
+ <rsc_op id="19" operation="monitor" operation_key="remote1_monitor_60000" on_node="18node1" on_node_uuid="1">
<primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_timeout="20000" />
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder"/>
+ <pseudo_event id="18" operation="start" operation_key="remote1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="6">
<action_set>
- <rsc_op id="23" operation="start" operation_key="FAKE2_start_0" on_node="remote1" on_node_uuid="remote1" router_node="18node1">
- <primitive id="FAKE2" class="ocf" provider="heartbeat" type="Dummy"/>
+ <pseudo_event id="18" operation="start" operation_key="remote1_start_0">
<attributes CRM_meta_timeout="20000" />
- </rsc_op>
+ </pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1"/>
+ <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5"/>
</trigger>
<trigger>
- <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder"/>
+ <rsc_op id="21" operation="migrate_from" operation_key="remote1_migrate_from_0" on_node="18node1" on_node_uuid="1"/>
</trigger>
</inputs>
</synapse>
<synapse id="7">
<action_set>
- <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder">
- <primitive id="FAKE2" class="ocf" provider="heartbeat" type="Dummy"/>
+ <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5">
+ <primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
- <inputs/>
- </synapse>
- <synapse id="8">
- <action_set>
- <rsc_op id="6" operation="monitor" operation_key="FAKE2_monitor_60000" on_node="remote1" on_node_uuid="remote1" router_node="18node1">
- <primitive id="FAKE2" class="ocf" provider="heartbeat" type="Dummy"/>
- <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_timeout="20000" />
- </rsc_op>
- </action_set>
<inputs>
<trigger>
- <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1"/>
- </trigger>
- <trigger>
- <rsc_op id="23" operation="start" operation_key="FAKE2_start_0" on_node="remote1" on_node_uuid="remote1" router_node="18node1"/>
+ <rsc_op id="21" operation="migrate_from" operation_key="remote1_migrate_from_0" on_node="18node1" on_node_uuid="1"/>
</trigger>
</inputs>
</synapse>
- <synapse id="9">
+ <synapse id="8">
<action_set>
<pseudo_event id="7" operation="all_stopped" operation_key="all_stopped">
<attributes />
@@ -127,9 +110,6 @@
<trigger>
<rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5"/>
</trigger>
- <trigger>
- <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder"/>
- </trigger>
</inputs>
</synapse>
</transition_graph>
View
10 pengine/test10/remote-move.summary
@@ -12,19 +12,17 @@ RemoteOnline: [ remote1 ]
Transition Summary:
* Move shooter (Started 18node1 -> 18builder)
- * Move remote1 (Started 18builder -> 18node1)
- * Restart FAKE2 (Started remote1)
+ * Migrate remote1 (Started 18builder -> 18node1)
Executing cluster transition:
* Resource action: shooter stop on 18node1
- * Resource action: FAKE2 stop on remote1
+ * Resource action: remote1 migrate_to on 18builder
* Resource action: shooter start on 18builder
+ * Resource action: remote1 migrate_from on 18node1
* Resource action: remote1 stop on 18builder
* Pseudo action: all_stopped
* Resource action: shooter monitor=60000 on 18builder
- * Resource action: remote1 start on 18node1
- * Resource action: FAKE2 start on remote1
- * Resource action: FAKE2 monitor=60000 on remote1
+ * Pseudo action: remote1_start_0
* Resource action: remote1 monitor=60000 on 18node1
Revised cluster status:

0 comments on commit 2d00597

Please sign in to comment.
Something went wrong with that request. Please try again.