diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java deleted file mode 100644 index dcbdc586909..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java +++ /dev/null @@ -1,2085 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.controller.internal; - -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.FileInputStream; -import java.lang.reflect.Field; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.ambari.server.AmbariException; -import org.apache.ambari.server.H2DatabaseCleaner; -import org.apache.ambari.server.Role; -import org.apache.ambari.server.RoleCommand; -import org.apache.ambari.server.actionmanager.ActionManager; -import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper; -import org.apache.ambari.server.actionmanager.ExecutionCommandWrapperFactory; -import org.apache.ambari.server.actionmanager.HostRoleCommand; -import org.apache.ambari.server.actionmanager.HostRoleStatus; -import org.apache.ambari.server.actionmanager.Stage; -import org.apache.ambari.server.agent.ExecutionCommand; -import org.apache.ambari.server.agent.ExecutionCommand.KeyNames; -import org.apache.ambari.server.audit.AuditLogger; -import org.apache.ambari.server.configuration.Configuration; -import org.apache.ambari.server.controller.AmbariManagementController; -import org.apache.ambari.server.controller.AmbariServer; -import org.apache.ambari.server.controller.ResourceProviderFactory; -import org.apache.ambari.server.controller.spi.Predicate; -import org.apache.ambari.server.controller.spi.Request; -import org.apache.ambari.server.controller.spi.RequestStatus; -import org.apache.ambari.server.controller.spi.Resource; -import org.apache.ambari.server.controller.spi.Resource.Type; -import org.apache.ambari.server.controller.spi.ResourceProvider; -import org.apache.ambari.server.controller.spi.SystemException; -import org.apache.ambari.server.controller.utilities.PredicateBuilder; -import org.apache.ambari.server.controller.utilities.PropertyHelper; -import org.apache.ambari.server.events.publishers.AmbariEventPublisher; -import org.apache.ambari.server.orm.GuiceJpaInitializer; -import org.apache.ambari.server.orm.InMemoryDefaultTestModule; -import org.apache.ambari.server.orm.dao.ExecutionCommandDAO; -import org.apache.ambari.server.orm.dao.HostRoleCommandDAO; -import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; -import org.apache.ambari.server.orm.dao.RequestDAO; -import org.apache.ambari.server.orm.dao.StackDAO; -import org.apache.ambari.server.orm.dao.StageDAO; -import org.apache.ambari.server.orm.dao.UpgradeDAO; -import org.apache.ambari.server.orm.entities.ExecutionCommandEntity; -import org.apache.ambari.server.orm.entities.HostRoleCommandEntity; -import org.apache.ambari.server.orm.entities.RepoDefinitionEntity; -import org.apache.ambari.server.orm.entities.RepoOsEntity; -import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; -import org.apache.ambari.server.orm.entities.RequestEntity; -import org.apache.ambari.server.orm.entities.StackEntity; -import org.apache.ambari.server.orm.entities.StageEntity; -import org.apache.ambari.server.orm.entities.UpgradeEntity; -import org.apache.ambari.server.orm.entities.UpgradeGroupEntity; -import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity; -import org.apache.ambari.server.orm.entities.UpgradeItemEntity; -import org.apache.ambari.server.security.TestAuthenticationFactory; -import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryAction; -import org.apache.ambari.server.serveraction.upgrades.ConfigureAction; -import org.apache.ambari.server.state.Cluster; -import org.apache.ambari.server.state.Clusters; -import org.apache.ambari.server.state.Config; -import org.apache.ambari.server.state.ConfigFactory; -import org.apache.ambari.server.state.ConfigHelper; -import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.HostState; -import org.apache.ambari.server.state.RepositoryType; -import org.apache.ambari.server.state.Service; -import org.apache.ambari.server.state.ServiceComponent; -import org.apache.ambari.server.state.ServiceComponentHost; -import org.apache.ambari.server.state.ServiceGroup; -import org.apache.ambari.server.state.StackId; -import org.apache.ambari.server.state.UpgradeContext; -import org.apache.ambari.server.state.UpgradeHelper; -import org.apache.ambari.server.state.UpgradeState; -import org.apache.ambari.server.state.stack.upgrade.ConfigureTask; -import org.apache.ambari.server.state.stack.upgrade.Direction; -import org.apache.ambari.server.state.stack.upgrade.UpgradeType; -import org.apache.ambari.server.topology.TopologyManager; -import org.apache.ambari.server.utils.StageUtils; -import org.apache.ambari.server.view.ViewRegistry; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.easymock.EasyMock; -import org.easymock.EasyMockSupport; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.springframework.security.core.context.SecurityContextHolder; - -import com.google.common.base.Function; -import com.google.common.collect.Collections2; -import com.google.common.collect.Lists; -import com.google.gson.Gson; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; -import com.google.gson.JsonParser; -import com.google.inject.Binder; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Module; -import com.google.inject.util.Modules; - -/** - * UpgradeResourceDefinition tests. - */ -public class UpgradeResourceProviderTest extends EasyMockSupport { - - private UpgradeDAO upgradeDao = null; - private RequestDAO requestDao = null; - private RepositoryVersionDAO repoVersionDao = null; - private Injector injector; - private Clusters clusters; - private AmbariManagementController amc; - private ConfigHelper configHelper; - private StackDAO stackDAO; - private TopologyManager topologyManager; - private ConfigFactory configFactory; - private HostRoleCommandDAO hrcDAO; - - RepositoryVersionEntity repoVersionEntity2110; - RepositoryVersionEntity repoVersionEntity2111; - RepositoryVersionEntity repoVersionEntity2112; - RepositoryVersionEntity repoVersionEntity2200; - - /** - * Creates a single host cluster with ZOOKEEPER_SERVER and ZOOKEEPER_CLIENT on - * {@link #repoVersionEntity2110}. - * - * @throws Exception - */ - @Before - public void before() throws Exception { - SecurityContextHolder.getContext().setAuthentication( - TestAuthenticationFactory.createAdministrator()); - - // setup the config helper for placeholder resolution - configHelper = EasyMock.createNiceMock(ConfigHelper.class); - - expect( - configHelper.getPlaceholderValueFromDesiredConfigurations( - EasyMock.anyObject(Cluster.class), EasyMock.eq("{{foo/bar}}"))).andReturn( - "placeholder-rendered-properly").anyTimes(); - - expect( - configHelper.getDefaultProperties(EasyMock.anyObject(StackId.class), - EasyMock.anyString())).andReturn( - new HashMap<>()).anyTimes(); - - replay(configHelper); - - InMemoryDefaultTestModule module = new InMemoryDefaultTestModule(); - - // create an injector which will inject the mocks - injector = Guice.createInjector( - Modules.override(module).with(new MockModule())); - - H2DatabaseCleaner.resetSequences(injector); - injector.getInstance(GuiceJpaInitializer.class); - - amc = injector.getInstance(AmbariManagementController.class); - configFactory = injector.getInstance(ConfigFactory.class); - - Field field = AmbariServer.class.getDeclaredField("clusterController"); - field.setAccessible(true); - field.set(null, amc); - - stackDAO = injector.getInstance(StackDAO.class); - upgradeDao = injector.getInstance(UpgradeDAO.class); - requestDao = injector.getInstance(RequestDAO.class); - repoVersionDao = injector.getInstance(RepositoryVersionDAO.class); - hrcDAO = injector.getInstance(HostRoleCommandDAO.class); - - AmbariEventPublisher publisher = EasyMock.createNiceMock(AmbariEventPublisher.class); - replay(publisher); - ViewRegistry.initInstance(new ViewRegistry(publisher)); - - // TODO AMARI-12698, this file is attempting to check RU on version 2.1.1, which doesn't support it - // because it has no upgrade packs. We should use correct versions that have stacks. - // For now, Ignore the tests that fail. - StackEntity stackEntity211 = stackDAO.find("HDP", "2.1.1"); - StackEntity stackEntity220 = stackDAO.find("HDP", "2.2.0"); - StackId stack211 = new StackId(stackEntity211); - - repoVersionEntity2110 = new RepositoryVersionEntity(); - repoVersionEntity2110.setDisplayName("My New Version 1"); - repoVersionEntity2110.addRepoOsEntities(createTestOperatingSystems()); - repoVersionEntity2110.setStack(stackEntity211); - repoVersionEntity2110.setVersion("2.1.1.0"); - repoVersionDao.create(repoVersionEntity2110); - - repoVersionEntity2111 = new RepositoryVersionEntity(); - repoVersionEntity2111.setDisplayName("My New Version 2 for minor upgrade"); - repoVersionEntity2111.addRepoOsEntities(createTestOperatingSystems()); - repoVersionEntity2111.setStack(stackEntity211); - repoVersionEntity2111.setVersion("2.1.1.1"); - repoVersionDao.create(repoVersionEntity2111); - - repoVersionEntity2112 = new RepositoryVersionEntity(); - repoVersionEntity2112.setDisplayName("My New Version 3 for patch upgrade"); - repoVersionEntity2112.addRepoOsEntities(createTestOperatingSystems()); - repoVersionEntity2112.setStack(stackEntity211); - repoVersionEntity2112.setVersion("2.1.1.2"); - repoVersionEntity2112.setType(RepositoryType.PATCH); - repoVersionEntity2112.setVersionXml(""); - repoVersionDao.create(repoVersionEntity2112); - - repoVersionEntity2200 = new RepositoryVersionEntity(); - repoVersionEntity2200.setDisplayName("My New Version 4 for major upgrade"); - repoVersionEntity2200.addRepoOsEntities(createTestOperatingSystems()); - repoVersionEntity2200.setStack(stackEntity220); - repoVersionEntity2200.setVersion("2.2.0.0"); - repoVersionDao.create(repoVersionEntity2200); - - clusters = injector.getInstance(Clusters.class); - - clusters.addCluster("c1", stack211); - Cluster cluster = clusters.getCluster("c1"); - - clusters.addHost("h1"); - Host host = clusters.getHost("h1"); - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6.3"); - host.setHostAttributes(hostAttributes); - host.setState(HostState.HEALTHY); - - clusters.mapHostToCluster("h1", "c1"); - - // add a single ZK server and client on 2.1.1.0 - ServiceGroup serviceGroup = cluster.addServiceGroup(UpgradeResourceProvider.DUMMY_SERVICE_GROUP, stack211.getStackId()); - Service service = cluster.addService(serviceGroup, "ZOOKEEPER", "ZOOKEEPER", repoVersionEntity2110); - ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER"); - ServiceComponentHost sch = component.addServiceComponentHost("h1"); - sch.setVersion("2.1.1.0"); - - component = service.addServiceComponent("ZOOKEEPER_CLIENT"); - sch = component.addServiceComponentHost("h1"); - sch.setVersion("2.1.1.0"); - - Configuration configuration = injector.getInstance(Configuration.class); - configuration.setProperty("upgrade.parameter.zk-server.timeout", "824"); - - topologyManager = injector.getInstance(TopologyManager.class); - StageUtils.setTopologyManager(topologyManager); - StageUtils.setConfiguration(configuration); - ActionManager.setTopologyManager(topologyManager); - EasyMock.replay(injector.getInstance(AuditLogger.class)); - } - - private List createTestOperatingSystems() { - List operatingSystems = new ArrayList<>(); - RepoDefinitionEntity repoDefinitionEntity1 = new RepoDefinitionEntity(); - repoDefinitionEntity1.setRepoID("HDP-UTILS"); - repoDefinitionEntity1.setBaseUrl(""); - repoDefinitionEntity1.setRepoName("HDP-UTILS"); - RepoDefinitionEntity repoDefinitionEntity2 = new RepoDefinitionEntity(); - repoDefinitionEntity2.setRepoID("HDP"); - repoDefinitionEntity2.setBaseUrl(""); - repoDefinitionEntity2.setRepoName("HDP"); - RepoOsEntity repoOsEntity = new RepoOsEntity(); - repoOsEntity.setFamily("redhat6"); - repoOsEntity.setAmbariManaged(true); - repoOsEntity.addRepoDefinition(repoDefinitionEntity1); - repoOsEntity.addRepoDefinition(repoDefinitionEntity2); - operatingSystems.add(repoOsEntity); - return operatingSystems; - } - - @After - public void after() throws AmbariException, SQLException { - H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector); - EasyMock.reset(injector.getInstance(AuditLogger.class)); - injector = null; - } - - /** - * Obtain request id from the {@code RequestStatus} - * @param requestStatus reqult of the {@code createResources} - * @return id of the request - */ - private long getRequestId(RequestStatus requestStatus){ - assertEquals(1, requestStatus.getAssociatedResources().size()); - Resource r = requestStatus.getAssociatedResources().iterator().next(); - String id = r.getPropertyValue("Upgrade/request_id").toString(); - return Long.parseLong(id); - } - - @Test - public void testCreateResourcesWithAutoSkipFailures() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_MANUAL_VERIFICATION, Boolean.FALSE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity entity = upgrades.get(0); - assertTrue(cluster.getClusterId().longValue() == entity.getClusterId().longValue()); - - List upgradeGroups = entity.getUpgradeGroups(); - assertEquals(3, upgradeGroups.size()); - - UpgradeGroupEntity preClusterGroup = upgradeGroups.get(0); - assertEquals("PRE_CLUSTER", preClusterGroup.getName()); - - List preClusterUpgradeItems = preClusterGroup.getItems(); - assertEquals(2, preClusterUpgradeItems.size()); - assertEquals("Foo", parseSingleMessage(preClusterUpgradeItems.get(0).getText())); - assertEquals("Foo", parseSingleMessage(preClusterUpgradeItems.get(1).getText())); - - UpgradeGroupEntity zookeeperGroup = upgradeGroups.get(1); - assertEquals("ZOOKEEPER", zookeeperGroup.getName()); - - List zookeeperUpgradeItems = zookeeperGroup.getItems(); - assertEquals(5, zookeeperUpgradeItems.size()); - - assertEquals("This is a manual task with a placeholder of placeholder-rendered-properly", - parseSingleMessage(zookeeperUpgradeItems.get(0).getText())); - assertEquals("Restarting ZooKeeper Server on h1", zookeeperUpgradeItems.get(1).getText()); - assertEquals("Updating configuration zookeeper-newconfig", - zookeeperUpgradeItems.get(2).getText()); - assertEquals("Service Check ZooKeeper", zookeeperUpgradeItems.get(3).getText()); - assertTrue(zookeeperUpgradeItems.get(4).getText().contains("There are failures that were automatically skipped")); - - // the last upgrade item is the skipped failure check - UpgradeItemEntity skippedFailureCheck = zookeeperUpgradeItems.get(zookeeperUpgradeItems.size() - 1); - skippedFailureCheck.getTasks().contains(AutoSkipFailedSummaryAction.class.getName()); - - UpgradeGroupEntity postClusterGroup = upgradeGroups.get(2); - assertEquals("POST_CLUSTER", postClusterGroup.getName()); - - List postClusterUpgradeItems = postClusterGroup.getItems(); - assertEquals(2, postClusterUpgradeItems.size()); - assertEquals("Please confirm you are ready to finalize", parseSingleMessage(postClusterUpgradeItems.get(0).getText())); - assertEquals("Save Cluster State", postClusterUpgradeItems.get(1).getText()); - } - - @Test - public void testCreateResourcesWithAutoSkipManualVerification() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_MANUAL_VERIFICATION, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity entity = upgrades.get(0); - assertTrue(cluster.getClusterId().longValue() == entity.getClusterId().longValue()); - - List upgradeGroups = entity.getUpgradeGroups(); - assertEquals(2, upgradeGroups.size()); - - UpgradeGroupEntity zookeeperGroup = upgradeGroups.get(0); - assertEquals("ZOOKEEPER", zookeeperGroup.getName()); - - List zookeeperUpgradeItems = zookeeperGroup.getItems(); - assertEquals(3, zookeeperUpgradeItems.size()); - assertEquals("Restarting ZooKeeper Server on h1", zookeeperUpgradeItems.get(0).getText()); - assertEquals("Updating configuration zookeeper-newconfig", - zookeeperUpgradeItems.get(1).getText()); - assertEquals("Service Check ZooKeeper", zookeeperUpgradeItems.get(2).getText()); - - UpgradeGroupEntity postClusterGroup = upgradeGroups.get(1); - assertEquals("POST_CLUSTER", postClusterGroup.getName()); - - List postClusterUpgradeItems = postClusterGroup.getItems(); - assertEquals(1, postClusterUpgradeItems.size()); - assertEquals("Save Cluster State", postClusterUpgradeItems.get(0).getText()); - } - - @Test - public void testCreateResourcesWithAutoSkipAll() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_MANUAL_VERIFICATION, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity entity = upgrades.get(0); - assertTrue(cluster.getClusterId().longValue() == entity.getClusterId().longValue()); - - List upgradeGroups = entity.getUpgradeGroups(); - assertEquals(2, upgradeGroups.size()); - - UpgradeGroupEntity zookeeperGroup = upgradeGroups.get(0); - assertEquals("ZOOKEEPER", zookeeperGroup.getName()); - - List zookeeperUpgradeItems = zookeeperGroup.getItems(); - assertEquals(4, zookeeperUpgradeItems.size()); - - assertEquals("Restarting ZooKeeper Server on h1", zookeeperUpgradeItems.get(0).getText()); - assertEquals("Updating configuration zookeeper-newconfig", - zookeeperUpgradeItems.get(1).getText()); - assertEquals("Service Check ZooKeeper", zookeeperUpgradeItems.get(2).getText()); - assertTrue(zookeeperUpgradeItems.get(3).getText().contains("There are failures that were automatically skipped")); - - // the last upgrade item is the skipped failure check - UpgradeItemEntity skippedFailureCheck = zookeeperUpgradeItems.get(zookeeperUpgradeItems.size() - 1); - skippedFailureCheck.getTasks().contains(AutoSkipFailedSummaryAction.class.getName()); - - UpgradeGroupEntity postClusterGroup = upgradeGroups.get(1); - assertEquals("POST_CLUSTER", postClusterGroup.getName()); - - List postClusterUpgradeItems = postClusterGroup.getItems(); - assertEquals(1, postClusterUpgradeItems.size()); - assertEquals("Save Cluster State", postClusterUpgradeItems.get(0).getText()); - } - - @Test - public void testGetResources() throws Exception { - RequestStatus status = testCreateResources(); - - Set createdResources = status.getAssociatedResources(); - assertEquals(1, createdResources.size()); - Resource res = createdResources.iterator().next(); - Long id = (Long) res.getPropertyValue("Upgrade/request_id"); - assertNotNull(id); - assertEquals(Long.valueOf(1), id); - - // upgrade - Set propertyIds = new HashSet<>(); - propertyIds.add("Upgrade"); - - Predicate predicate = new PredicateBuilder() - .property(UpgradeResourceProvider.UPGRADE_REQUEST_ID).equals("1").and() - .property(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME).equals("c1") - .toPredicate(); - Request request = PropertyHelper.getReadRequest(propertyIds); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Set resources = upgradeResourceProvider.getResources(request, predicate); - - assertEquals(1, resources.size()); - res = resources.iterator().next(); - assertNotNull(res.getPropertyValue("Upgrade/progress_percent")); - assertNotNull(res.getPropertyValue(UpgradeResourceProvider.UPGRADE_DIRECTION)); - assertEquals(Direction.UPGRADE, res.getPropertyValue(UpgradeResourceProvider.UPGRADE_DIRECTION)); - assertEquals(false, res.getPropertyValue(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES)); - assertEquals(false, res.getPropertyValue(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES)); - assertEquals(UpgradeType.ROLLING, res.getPropertyValue(UpgradeResourceProvider.UPGRADE_TYPE)); - - // upgrade groups - propertyIds.clear(); - propertyIds.add("UpgradeGroup"); - - predicate = new PredicateBuilder() - .property(UpgradeGroupResourceProvider.UPGRADE_REQUEST_ID).equals("1").and() - .property(UpgradeGroupResourceProvider.UPGRADE_CLUSTER_NAME).equals("c1") - .toPredicate(); - request = PropertyHelper.getReadRequest(propertyIds); - - ResourceProvider upgradeGroupResourceProvider = new UpgradeGroupResourceProvider(amc); - resources = upgradeGroupResourceProvider.getResources(request, predicate); - - assertEquals(3, resources.size()); - res = resources.iterator().next(); - assertNotNull(res.getPropertyValue("UpgradeGroup/status")); - assertNotNull(res.getPropertyValue("UpgradeGroup/group_id")); - assertNotNull(res.getPropertyValue("UpgradeGroup/total_task_count")); - assertNotNull(res.getPropertyValue("UpgradeGroup/in_progress_task_count")); - assertNotNull(res.getPropertyValue("UpgradeGroup/completed_task_count")); - - // upgrade items - propertyIds.clear(); - propertyIds.add("UpgradeItem"); - - predicate = new PredicateBuilder() - .property(UpgradeItemResourceProvider.UPGRADE_GROUP_ID).equals("1").and() - .property(UpgradeItemResourceProvider.UPGRADE_REQUEST_ID).equals("1").and() - .property(UpgradeItemResourceProvider.UPGRADE_CLUSTER_NAME).equals("c1") - .toPredicate(); - request = PropertyHelper.getReadRequest(propertyIds); - - ResourceProvider upgradeItemResourceProvider = new UpgradeItemResourceProvider(amc); - resources = upgradeItemResourceProvider.getResources(request, predicate); - - assertEquals(2, resources.size()); - - res = resources.iterator().next(); - assertNotNull(res.getPropertyValue("UpgradeItem/status")); - - // !!! check for manual stage vs item text - propertyIds.clear(); - propertyIds.add("UpgradeItem"); - - predicate = new PredicateBuilder() - .property(UpgradeItemResourceProvider.UPGRADE_GROUP_ID).equals("3").and() - .property(UpgradeItemResourceProvider.UPGRADE_REQUEST_ID).equals("1").and() - .property(UpgradeItemResourceProvider.UPGRADE_CLUSTER_NAME).equals("c1") - .toPredicate(); - request = PropertyHelper.getReadRequest(propertyIds); - - upgradeItemResourceProvider = new UpgradeItemResourceProvider(amc); - resources = upgradeItemResourceProvider.getResources(request, predicate); - assertEquals(2, resources.size()); - res = resources.iterator().next(); - - assertEquals("Confirm Finalize", res.getPropertyValue("UpgradeItem/context")); - String msgStr = res.getPropertyValue("UpgradeItem/text").toString(); - JsonParser parser = new JsonParser(); - JsonArray msgArray = (JsonArray) parser.parse(msgStr); - JsonObject msg = (JsonObject) msgArray.get(0); - - assertTrue(msg.get("message").getAsString().startsWith("Please confirm")); - } - - /** - * Tests that retrieving an upgrade correctly populates less common upgrade - * options correctly. - */ - @Test - public void testGetResourcesWithSpecialOptions() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(0, upgrades.size()); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2111.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - // tests skipping SC failure options - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES, "true"); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - RequestStatus status = upgradeResourceProvider.createResources(request); - assertNotNull(status); - - // upgrade - Set propertyIds = new HashSet<>(); - propertyIds.add("Upgrade"); - - Predicate predicate = new PredicateBuilder() - .property(UpgradeResourceProvider.UPGRADE_REQUEST_ID).equals("1").and() - .property(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME).equals("c1") - .toPredicate(); - - request = PropertyHelper.getReadRequest(propertyIds); - Set resources = upgradeResourceProvider.getResources(request, predicate); - - assertEquals(1, resources.size()); - Resource resource = resources.iterator().next(); - - assertEquals(true, resource.getPropertyValue(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES)); - assertEquals(true, resource.getPropertyValue(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES)); - } - - - @Test - public void testCreatePartialDowngrade() throws Exception { - clusters.addHost("h2"); - Host host = clusters.getHost("h2"); - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6.3"); - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster("h2", "c1"); - Cluster cluster = clusters.getCluster("c1"); - Service service = cluster.getService("ZOOKEEPER"); - - // start out with 0 (sanity check) - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(0, upgrades.size()); - - // a downgrade MUST have an upgrade to come from, so populate an upgrade in - // the DB - RequestEntity requestEntity = new RequestEntity(); - requestEntity.setRequestId(2L); - requestEntity.setClusterId(cluster.getClusterId()); - requestEntity.setStages(new ArrayList<>()); - requestDao.create(requestEntity); - - UpgradeEntity upgradeEntity = new UpgradeEntity(); - upgradeEntity.setClusterId(cluster.getClusterId()); - upgradeEntity.setDirection(Direction.UPGRADE); - upgradeEntity.setRepositoryVersion(repoVersionEntity2200); - upgradeEntity.setUpgradePackage("upgrade_test"); - upgradeEntity.setUpgradeType(UpgradeType.ROLLING); - upgradeEntity.setRequestEntity(requestEntity); - - UpgradeHistoryEntity history = new UpgradeHistoryEntity(); - history.setUpgrade(upgradeEntity); - history.setFromRepositoryVersion(service.getDesiredRepositoryVersion()); - history.setTargetRepositoryVersion(repoVersionEntity2200); - history.setServiceName(service.getName()); - history.setComponentName("ZOKKEEPER_SERVER"); - upgradeEntity.addHistory(history); - - history = new UpgradeHistoryEntity(); - history.setUpgrade(upgradeEntity); - history.setFromRepositoryVersion(service.getDesiredRepositoryVersion()); - history.setTargetRepositoryVersion(repoVersionEntity2200); - history.setServiceName(service.getName()); - history.setComponentName("ZOKKEEPER_CLIENT"); - upgradeEntity.addHistory(history); - - upgradeDao.create(upgradeEntity); - upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - // push a ZK server foward to the new repo version, leaving the old one on - // the old version - ServiceComponent component = service.getServiceComponent("ZOOKEEPER_SERVER"); - ServiceComponentHost sch = component.addServiceComponentHost("h2"); - sch.setVersion(repoVersionEntity2200.getVersion()); - - UpgradeEntity lastUpgrade = upgradeDao.findLastUpgradeForCluster(cluster.getClusterId(), Direction.UPGRADE); - assertNotNull(lastUpgrade); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name()); - - Map requestInfoProperties = new HashMap<>(); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), requestInfoProperties); - upgradeResourceProvider.createResources(request); - - upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(2, upgrades.size()); - - UpgradeEntity downgrade = upgrades.get(1); - assertTrue(cluster.getClusterId().longValue() == downgrade.getClusterId().longValue()); - - List upgradeGroups = downgrade.getUpgradeGroups(); - assertEquals(3, upgradeGroups.size()); - - // the ZK restart group should only have 3 entries since the ZK server on h1 - // didn't get upgraded - UpgradeGroupEntity group = upgradeGroups.get(1); - assertEquals("ZOOKEEPER", group.getName()); - assertEquals(3, group.getItems().size()); - } - - - @Test - public void testDowngradeToBase() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2111.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity upgrade = upgrades.get(0); - - // now abort the upgrade so another can be created - abortUpgrade(upgrade.getRequestId()); - - // create another upgrade which should fail - requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, "9999"); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - try { - upgradeResourceProvider.createResources(request); - Assert.fail("Expected an exception going downgrade with no upgrade pack"); - } catch (Exception e) { - // !!! expected - } - - // fix the properties and try again - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name()); - - Map requestInfoProperties = new HashMap<>(); - - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), requestInfoProperties); - RequestStatus status = upgradeResourceProvider.createResources(request); - assertEquals(1, status.getAssociatedResources().size()); - Resource r = status.getAssociatedResources().iterator().next(); - String id = r.getPropertyValue("Upgrade/request_id").toString(); - - UpgradeEntity entity = upgradeDao.findUpgrade(Long.parseLong(id)); - assertNotNull(entity); - assertEquals(Direction.DOWNGRADE, entity.getDirection()); - - // associated version is the FROM on DOWNGRADE - assertEquals(repoVersionEntity2111.getVersion(), entity.getRepositoryVersion().getVersion()); - - // target is by service - assertEquals(repoVersionEntity2110.getVersion(), - entity.getHistory().iterator().next().getTargetVersion()); - - StageDAO dao = injector.getInstance(StageDAO.class); - List stages = dao.findByRequestId(entity.getRequestId()); - - Gson gson = new Gson(); - for (StageEntity se : stages) { - Map map = gson.>fromJson(se.getCommandParamsStage(), Map.class); - assertTrue(map.containsKey("upgrade_direction")); - assertEquals("downgrade", map.get("upgrade_direction")); - } - - } - - - - /** - * Test Downgrade from the partially completed upgrade - */ - @Test - public void testNotFullDowngrade() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - // add additional service for the test - ServiceGroup serviceGroup = cluster.getServiceGroup(UpgradeResourceProvider.DUMMY_SERVICE_GROUP); - Service service = cluster.addService(serviceGroup, "HIVE", "HIVE", repoVersionEntity2110); - - ServiceComponent component = service.addServiceComponent("HIVE_SERVER"); - ServiceComponentHost sch = component.addServiceComponentHost("h1"); - sch.setVersion("2.1.1.0"); - - // create upgrade request - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_nonrolling_new_stack"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, "NON_ROLLING"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - // check that upgrade was created and groups for the tested services are on place - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity upgrade = upgrades.get(0); - List groups = upgrade.getUpgradeGroups(); - boolean isHiveGroupFound = false; - boolean isZKGroupFound = false; - - // look only for testing groups - for (UpgradeGroupEntity group: groups) { - if (group.getName().equalsIgnoreCase("hive")) { - isHiveGroupFound = true; - } else if (group.getName().equalsIgnoreCase("zookeeper")){ - isZKGroupFound = true; - } - } - - assertTrue(isHiveGroupFound); - assertTrue(isZKGroupFound); - - isHiveGroupFound = false; - isZKGroupFound = false; - sch.setVersion("2.2.0.0"); - - // now abort the upgrade so another can be created - abortUpgrade(upgrade.getRequestId()); - - // create downgrade with one upgraded service - service.setDesiredRepositoryVersion(repoVersionEntity2200); - - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_nonrolling_new_stack"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name()); - - Map requestInfoProperties = new HashMap<>(); - - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), requestInfoProperties); - RequestStatus status = upgradeResourceProvider.createResources(request); - UpgradeEntity upgradeEntity = upgradeDao.findUpgradeByRequestId(getRequestId(status)); - - for (UpgradeGroupEntity group: upgradeEntity.getUpgradeGroups()) { - if (group.getName().equalsIgnoreCase("hive")) { - isHiveGroupFound = true; - } else if (group.getName().equalsIgnoreCase("zookeeper")){ - isZKGroupFound = true; - } - } - - // as services not updated, nothing to downgrade - assertTrue(isHiveGroupFound); - assertFalse(isZKGroupFound); - } - - - @Test - public void testAbort() throws Exception { - RequestStatus status = testCreateResources(); - - Set createdResources = status.getAssociatedResources(); - assertEquals(1, createdResources.size()); - Resource res = createdResources.iterator().next(); - Long id = (Long) res.getPropertyValue("Upgrade/request_id"); - assertNotNull(id); - assertEquals(Long.valueOf(1), id); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, id.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_STATUS, "ABORTED"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SUSPENDED, "true"); - - UpgradeResourceProvider urp = createProvider(amc); - - Request req = PropertyHelper.getUpdateRequest(requestProps, null); - urp.updateResources(req, null); - - List commands = hrcDAO.findByRequest(id); - - int i = 0; - for (HostRoleCommandEntity command : commands) { - if (i < 3) { - command.setStatus(HostRoleStatus.COMPLETED); - } else { - command.setStatus(HostRoleStatus.ABORTED); - } - hrcDAO.merge(command); - i++; - } - - req = PropertyHelper.getReadRequest( - UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, - UpgradeResourceProvider.UPGRADE_ID, - UpgradeResourceProvider.REQUEST_PROGRESS_PERCENT_ID); - - Predicate pred = new PredicateBuilder() - .property(UpgradeResourceProvider.UPGRADE_REQUEST_ID).equals(id.toString()) - .and() - .property(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME).equals("c1") - .toPredicate(); - - Set resources = urp.getResources(req, pred); - assertEquals(1, resources.size()); - res = resources.iterator().next(); - - Double value = (Double) res.getPropertyValue(UpgradeResourceProvider.REQUEST_PROGRESS_PERCENT_ID); - - assertEquals(37.5d, value, 0.1d); - } - - - @Test - public void testRetry() throws Exception { - RequestStatus status = testCreateResources(); - - Set createdResources = status.getAssociatedResources(); - assertEquals(1, createdResources.size()); - Resource res = createdResources.iterator().next(); - Long id = (Long) res.getPropertyValue("Upgrade/request_id"); - assertNotNull(id); - assertEquals(Long.valueOf(1), id); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, id.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_STATUS, "ABORTED"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SUSPENDED, "true"); - - UpgradeResourceProvider urp = createProvider(amc); - - // !!! make sure we can. actual abort is tested elsewhere - Request req = PropertyHelper.getUpdateRequest(requestProps, null); - urp.updateResources(req, null); - - ActionManager am = injector.getInstance(ActionManager.class); - - List commands = am.getRequestTasks(id); - - boolean foundOne = false; - for (HostRoleCommand hrc : commands) { - if (hrc.getRole().equals(Role.AMBARI_SERVER_ACTION)) { - assertEquals(-1L, hrc.getHostId()); - assertNull(hrc.getHostName()); - foundOne = true; - } - } - assertTrue("Expected at least one server-side action", foundOne); - - HostRoleCommand cmd = commands.get(commands.size()-1); - - HostRoleCommandDAO dao = injector.getInstance(HostRoleCommandDAO.class); - HostRoleCommandEntity entity = dao.findByPK(cmd.getTaskId()); - entity.setStatus(HostRoleStatus.ABORTED); - dao.merge(entity); - - requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, id.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_STATUS, "PENDING"); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SUSPENDED, "false"); - - // !!! make sure we can. actual reset is tested elsewhere - req = PropertyHelper.getUpdateRequest(requestProps, null); - urp.updateResources(req, null); - } - - @Test(expected = IllegalArgumentException.class) - public void testAbortWithoutSuspendFlag() throws Exception { - RequestStatus status = testCreateResources(); - - Set createdResources = status.getAssociatedResources(); - assertEquals(1, createdResources.size()); - Resource res = createdResources.iterator().next(); - Long id = (Long) res.getPropertyValue("Upgrade/request_id"); - assertNotNull(id); - assertEquals(Long.valueOf(1), id); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, id.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_STATUS, "ABORTED"); - - UpgradeResourceProvider urp = createProvider(amc); - Request req = PropertyHelper.getUpdateRequest(requestProps, null); - urp.updateResources(req, null); - } - - @Test - public void testDirectionUpgrade() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - StackEntity stackEntity = stackDAO.find("HDP", "2.1.1"); - - RepositoryVersionEntity repoVersionEntity = new RepositoryVersionEntity(); - repoVersionEntity.setDisplayName("My New Version 3"); - repoVersionEntity.addRepoOsEntities(new ArrayList<>()); - repoVersionEntity.setStack(stackEntity); - repoVersionEntity.setVersion("2.2.2.3"); - repoVersionDao.create(repoVersionEntity); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_direction"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity upgrade = upgrades.get(0); - Long id = upgrade.getRequestId(); - assertEquals(3, upgrade.getUpgradeGroups().size()); - // Ensure that there are no items related to downgrade in the upgrade direction - UpgradeGroupEntity group = upgrade.getUpgradeGroups().get(2); - Assert.assertEquals("POST_CLUSTER", group.getName()); - Assert.assertTrue(!group.getItems().isEmpty()); - for (UpgradeItemEntity item : group.getItems()) { - Assert.assertFalse(item.getText().toLowerCase().contains("downgrade")); - } - - // now abort the upgrade so another can be created - abortUpgrade(upgrade.getRequestId()); - - requestProps.clear(); - // Now perform a downgrade - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_direction"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name()); - - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(2, upgrades.size()); - - upgrade = null; - for (UpgradeEntity u : upgrades) { - if (!u.getRequestId().equals(id)) { - upgrade = u; - } - } - assertNotNull(upgrade); - List groups = upgrade.getUpgradeGroups(); - assertEquals("Downgrade groups reduced from 3 to 2", 1, groups.size()); - group = upgrade.getUpgradeGroups().get(0); - assertEquals("Execution items increased from 1 to 2", 2, group.getItems().size()); - } - - - - @Test - public void testPercents() throws Exception { - RequestStatus status = testCreateResources(); - - Set createdResources = status.getAssociatedResources(); - assertEquals(1, createdResources.size()); - Resource res = createdResources.iterator().next(); - Long id = (Long) res.getPropertyValue("Upgrade/request_id"); - assertNotNull(id); - assertEquals(Long.valueOf(1), id); - - StageDAO stageDao = injector.getInstance(StageDAO.class); - HostRoleCommandDAO hrcDao = injector.getInstance(HostRoleCommandDAO.class); - - List stages = stageDao.findByRequestId(id); - List tasks = hrcDao.findByRequest(id); - - Set stageIds = new HashSet<>(); - for (StageEntity se : stages) { - stageIds.add(se.getStageId()); - } - - CalculatedStatus calc = null; - int i = 0; - for (HostRoleCommandEntity hrce : tasks) { - hrce.setStatus(HostRoleStatus.IN_PROGRESS); - hrcDao.merge(hrce); - calc = CalculatedStatus.statusFromStageSummary(hrcDao.findAggregateCounts(id), stageIds); - assertEquals(((i++) + 1) * 4.375d, calc.getPercent(), 0.01d); - assertEquals(HostRoleStatus.IN_PROGRESS, calc.getStatus()); - } - - i = 0; - for (HostRoleCommandEntity hrce : tasks) { - hrce.setStatus(HostRoleStatus.COMPLETED); - hrcDao.merge(hrce); - calc = CalculatedStatus.statusFromStageSummary(hrcDao.findAggregateCounts(id), stageIds); - assertEquals(35 + (((i++) + 1) * 8.125), calc.getPercent(), 0.01d); - if (i < 8) { - assertEquals(HostRoleStatus.IN_PROGRESS, calc.getStatus()); - } - } - - calc = CalculatedStatus.statusFromStageSummary(hrcDao.findAggregateCounts(id), stageIds); - assertEquals(HostRoleStatus.COMPLETED, calc.getStatus()); - assertEquals(100d, calc.getPercent(), 0.01d); - } - - - @Test - public void testCreateCrossStackUpgrade() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - StackId oldStack = repoVersionEntity2110.getStackId(); - - for (Service s : cluster.getServices().values()) { - assertEquals(oldStack, s.getDesiredStackId()); - - for (ServiceComponent sc : s.getServiceComponents().values()) { - assertEquals(oldStack, sc.getDesiredStackId()); - - for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { - assertEquals(repoVersionEntity2110.getVersion(), sch.getVersion()); - } - } - } - - - Config config = configFactory.createNew(cluster, "zoo.cfg", "abcdefg", Collections.singletonMap("a", "b"), null); - cluster.addDesiredConfig("admin", Collections.singleton(config)); - - // create the upgrade across major versions - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity upgrade = upgrades.get(0); - assertEquals(3, upgrade.getUpgradeGroups().size()); - - UpgradeGroupEntity group = upgrade.getUpgradeGroups().get(2); - assertEquals(2, group.getItems().size()); - - group = upgrade.getUpgradeGroups().get(0); - assertEquals(2, group.getItems().size()); - - assertTrue(cluster.getDesiredConfigs().containsKey("zoo.cfg")); - - for (Service s : cluster.getServices().values()) { - assertEquals(repoVersionEntity2200, s.getDesiredRepositoryVersion()); - - for (ServiceComponent sc : s.getServiceComponents().values()) { - assertEquals(repoVersionEntity2200, sc.getDesiredRepositoryVersion()); - } - } - } - - /** - * @param amc - * @return the provider - */ - private UpgradeResourceProvider createProvider(AmbariManagementController amc) { - ResourceProviderFactory factory = injector.getInstance(ResourceProviderFactory.class); - AbstractControllerResourceProvider.init(factory); - - Resource.Type type = Type.Upgrade; - return (UpgradeResourceProvider) AbstractControllerResourceProvider.getResourceProvider(type, amc); - } - - private RequestStatus testCreateResources() throws Exception { - - Cluster cluster = clusters.getCluster("c1"); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(0, upgrades.size()); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2111.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - RequestStatus status = upgradeResourceProvider.createResources(request); - - upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity entity = upgrades.get(0); - assertTrue(cluster.getClusterId().longValue() == entity.getClusterId().longValue()); - assertEquals(UpgradeType.ROLLING, entity.getUpgradeType()); - - StageDAO stageDAO = injector.getInstance(StageDAO.class); - List stageEntities = stageDAO.findByRequestId(entity.getRequestId()); - Gson gson = new Gson(); - for (StageEntity se : stageEntities) { - Map map = gson.> fromJson(se.getCommandParamsStage(),Map.class); - assertTrue(map.containsKey("upgrade_direction")); - assertEquals("upgrade", map.get("upgrade_direction")); - - if(map.containsKey("upgrade_type")){ - assertEquals("rolling_upgrade", map.get("upgrade_type")); - } - } - - List upgradeGroups = entity.getUpgradeGroups(); - assertEquals(3, upgradeGroups.size()); - - UpgradeGroupEntity group = upgradeGroups.get(1); - assertEquals(4, group.getItems().size()); - - assertTrue( - group.getItems().get(0).getText().contains("placeholder of placeholder-rendered-properly")); - - assertTrue(group.getItems().get(1).getText().contains("Restarting")); - assertTrue(group.getItems().get(2).getText().contains("Updating")); - assertTrue(group.getItems().get(3).getText().contains("Service Check")); - - ActionManager am = injector.getInstance(ActionManager.class); - List requests = am.getRequestsByStatus( - org.apache.ambari.server.actionmanager.RequestStatus.IN_PROGRESS, 100, true); - - assertEquals(1, requests.size()); - assertEquals(requests.get(0), entity.getRequestId()); - - List stages = am.getRequestStatus(requests.get(0).longValue()); - - assertEquals(8, stages.size()); - - List tasks = am.getRequestTasks(requests.get(0).longValue()); - // same number of tasks as stages here - assertEquals(8, tasks.size()); - - Set slaveStageIds = new HashSet<>(); - - UpgradeGroupEntity coreSlavesGroup = upgradeGroups.get(1); - - for (UpgradeItemEntity itemEntity : coreSlavesGroup.getItems()) { - slaveStageIds.add(itemEntity.getStageId()); - } - - for (Stage stage : stages) { - - // For this test the core slaves group stages should be skippable and NOT - // allow retry. - assertEquals(slaveStageIds.contains(stage.getStageId()), stage.isSkippable()); - - for (Map taskMap : stage.getHostRoleCommands().values()) { - - for (HostRoleCommand task : taskMap.values()) { - assertEquals(!slaveStageIds.contains(stage.getStageId()), task.isRetryAllowed()); - } - } - } - return status; - } - - @Test - public void testUpdateSkipFailures() throws Exception { - testCreateResourcesWithAutoSkipFailures(); - - List upgrades = upgradeDao.findUpgrades(1); - assertEquals(1, upgrades.size()); - - UpgradeEntity entity = upgrades.get(0); - - HostRoleCommandDAO dao = injector.getInstance(HostRoleCommandDAO.class); - - List tasks = dao.findByRequest(entity.getRequestId()); - for (HostRoleCommandEntity task : tasks) { - StageEntity stage = task.getStage(); - if (stage.isSkippable() && stage.isAutoSkipOnFailureSupported()) { - assertTrue(task.isFailureAutoSkipped()); - } else { - assertFalse(task.isFailureAutoSkipped()); - } - } - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES, Boolean.FALSE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, "" + entity.getRequestId()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getUpdateRequest(requestProps, null); - upgradeResourceProvider.updateResources(request, null); - - tasks = dao.findByRequest(entity.getRequestId()); - for (HostRoleCommandEntity task : tasks) { - if (task.getRoleCommand() == RoleCommand.SERVICE_CHECK) { - assertFalse(task.isFailureAutoSkipped()); - } else { - StageEntity stage = task.getStage(); - if (stage.isSkippable() && stage.isAutoSkipOnFailureSupported()) { - assertTrue(task.isFailureAutoSkipped()); - } else { - assertFalse(task.isFailureAutoSkipped()); - } - } - } - - requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES, Boolean.FALSE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, "" + entity.getRequestId()); - - request = PropertyHelper.getUpdateRequest(requestProps, null); - upgradeResourceProvider.updateResources(request, null); - - tasks = dao.findByRequest(entity.getRequestId()); - for (HostRoleCommandEntity task : tasks) { - if (task.getRoleCommand() == RoleCommand.SERVICE_CHECK) { - StageEntity stage = task.getStage(); - if (stage.isSkippable() && stage.isAutoSkipOnFailureSupported()) { - assertTrue(task.isFailureAutoSkipped()); - } else { - assertFalse(task.isFailureAutoSkipped()); - } - } else { - assertFalse(task.isFailureAutoSkipped()); - } - } - - requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_FAILURES, Boolean.FALSE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_SC_FAILURES, Boolean.FALSE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, "" + entity.getRequestId()); - - request = PropertyHelper.getUpdateRequest(requestProps, null); - upgradeResourceProvider.updateResources(request, null); - - tasks = dao.findByRequest(entity.getRequestId()); - for (HostRoleCommandEntity task : tasks) { - assertFalse(task.isFailureAutoSkipped()); - } - } - - /** - * Tests that an error while commiting the data cleanly rolls back the transaction so that - * no request/stage/tasks are created. - * - * @throws Exception - */ - @Test - public void testRollback() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_MANUAL_VERIFICATION, Boolean.FALSE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - // this will cause a NPE when creating the upgrade, allowing us to test - // rollback - UpgradeResourceProvider upgradeResourceProvider = createProvider(amc); - UpgradeResourceProvider.s_upgradeDAO = null; - - try { - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - Assert.fail("Expected a NullPointerException"); - } catch (NullPointerException npe) { - // expected - } - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(0, upgrades.size()); - - List requestIds = requestDao.findAllRequestIds(1, true, cluster.getClusterId()); - assertEquals(0, requestIds.size()); - } - - /** - * Tests that a {@link UpgradeType#HOST_ORDERED} upgrade throws an exception - * on missing hosts. - * - * @throws Exception - */ - @Test() - public void testCreateHostOrderedUpgradeThrowsExceptions() throws Exception { - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test_host_ordered"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.HOST_ORDERED.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - - try { - upgradeResourceProvider.createResources(request); - Assert.fail("The request should have failed due to the missing Upgrade/host_order property"); - } catch( SystemException systemException ){ - // expected - } - - // stick a bad host_ordered_hosts in there which has the wrong hosts - Set>> hostsOrder = new LinkedHashSet<>(); - Map> hostGrouping = new HashMap<>(); - hostGrouping.put("hosts", Lists.newArrayList("invalid-host")); - hostsOrder.add(hostGrouping); - - requestProps.put(UpgradeResourceProvider.UPGRADE_HOST_ORDERED_HOSTS, hostsOrder); - - try { - upgradeResourceProvider.createResources(request); - Assert.fail("The request should have failed due to invalid hosts"); - } catch (SystemException systemException) { - // expected - } - - // use correct hosts now - hostsOrder = new LinkedHashSet<>(); - hostGrouping = new HashMap<>(); - hostGrouping.put("hosts", Lists.newArrayList("h1")); - hostsOrder.add(hostGrouping); - - requestProps.put(UpgradeResourceProvider.UPGRADE_HOST_ORDERED_HOSTS, hostsOrder); - upgradeResourceProvider.createResources(request); - - - // make sure that the desired versions are updated - Cluster cluster = clusters.getCluster("c1"); - assertNotNull(cluster); - - Service service = cluster.getService("ZOOKEEPER"); - assertEquals(repoVersionEntity2200, service.getDesiredRepositoryVersion()); - } - - /** - * Exercises that a component that goes from upgrade->downgrade that switches - * {@code versionAdvertised} between will go to UKNOWN. This exercises - * {@link UpgradeHelper#updateDesiredRepositoriesAndConfigs(UpgradeContext)} - * - * @throws Exception - */ - @Test - public void testCreateUpgradeDowngradeCycleAdvertisingVersion() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - ServiceGroup serviceGroup = cluster.getServiceGroup(UpgradeResourceProvider.DUMMY_SERVICE_GROUP); - Service service = cluster.addService(serviceGroup, "STORM", "STORM", repoVersionEntity2110); - - ServiceComponent component = service.addServiceComponent("DRPC_SERVER"); - ServiceComponentHost sch = component.addServiceComponentHost("h1"); - sch.setVersion("2.1.1.0"); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - Map requestInfoProperties = new HashMap<>(); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), requestInfoProperties); - - RequestStatus status = upgradeResourceProvider.createResources(request); - assertEquals(1, status.getAssociatedResources().size()); - - Resource r = status.getAssociatedResources().iterator().next(); - String id = r.getPropertyValue("Upgrade/request_id").toString(); - - component = service.getServiceComponent("DRPC_SERVER"); - assertNotNull(component); - assertEquals("2.2.0.0", component.getDesiredVersion()); - - ServiceComponentHost hostComponent = component.getServiceComponentHost("h1"); - assertEquals(UpgradeState.IN_PROGRESS, hostComponent.getUpgradeState()); - - // !!! can't start a downgrade until cancelling the previous upgrade - abortUpgrade(Long.parseLong(id)); - - requestProps.clear(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name()); - - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), requestInfoProperties); - status = upgradeResourceProvider.createResources(request); - - component = service.getServiceComponent("DRPC_SERVER"); - assertNotNull(component); - assertEquals(repoVersionEntity2110, component.getDesiredRepositoryVersion()); - - hostComponent = component.getServiceComponentHost("h1"); - assertEquals(UpgradeState.NONE, hostComponent.getUpgradeState()); - assertEquals("UNKNOWN", hostComponent.getVersion()); - } - - /** - * Ensures that stages created with an HOU are sequential and do not skip any - * IDs. When there are stages with IDs like (1,2,3,5,6,7,10), the request will - * get stuck in a PENDING state. This affects HOU specifically since they can - * potentially try to create empty stages which won't get persisted (such as a - * STOP on client-only hosts). - * - * @throws Exception - */ - @Test() - public void testEmptyGroupingsDoNotSkipStageIds() throws Exception { - - StageDAO stageDao = injector.getInstance(StageDAO.class); - Assert.assertEquals(0, stageDao.findAll().size()); - - // strip out all non-client components - clients don't have STOP commands - Cluster cluster = clusters.getCluster("c1"); - List schs = cluster.getServiceComponentHosts("h1"); - for (ServiceComponentHost sch : schs) { - if (sch.isClientComponent()) { - continue; - } - - cluster.removeServiceComponentHost(sch); - } - - // define host order - Set>> hostsOrder = new LinkedHashSet<>(); - Map> hostGrouping = new HashMap<>(); - hostGrouping = new HashMap<>(); - hostGrouping.put("hosts", Lists.newArrayList("h1")); - hostsOrder.add(hostGrouping); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test_host_ordered"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.HOST_ORDERED.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS,Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - requestProps.put(UpgradeResourceProvider.UPGRADE_HOST_ORDERED_HOSTS, hostsOrder); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List stages = stageDao.findByRequestId(cluster.getUpgradeInProgress().getRequestId()); - Assert.assertEquals(3, stages.size()); - - long expectedStageId = 1L; - for (StageEntity stage : stages) { - Assert.assertEquals(expectedStageId++, stage.getStageId().longValue()); - } - } - - /** - * Tests that from/to repository version history is created correctly on the - * upgrade. - * - * @throws Exception - */ - @Test - public void testUpgradeHistory() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_MANUAL_VERIFICATION, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity upgrade = cluster.getUpgradeInProgress(); - List histories = upgrade.getHistory(); - assertEquals(2, histories.size()); - - for( UpgradeHistoryEntity history : histories){ - assertEquals( "ZOOKEEPER", history.getServiceName() ); - assertEquals(repoVersionEntity2110, history.getFromReposistoryVersion()); - assertEquals(repoVersionEntity2200, history.getTargetRepositoryVersion()); - } - - // abort the upgrade and create the downgrade - abortUpgrade(upgrade.getRequestId()); - - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_nonrolling_new_stack"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name()); - - Map requestInfoProperties = new HashMap<>(); - - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), - requestInfoProperties); - RequestStatus status = upgradeResourceProvider.createResources(request); - UpgradeEntity downgrade = upgradeDao.findUpgradeByRequestId(getRequestId(status)); - assertEquals(Direction.DOWNGRADE, downgrade.getDirection()); - - // check from/to history - histories = downgrade.getHistory(); - assertEquals(2, histories.size()); - - for (UpgradeHistoryEntity history : histories) { - assertEquals("ZOOKEEPER", history.getServiceName()); - assertEquals(repoVersionEntity2200, history.getFromReposistoryVersion()); - assertEquals(repoVersionEntity2110, history.getTargetRepositoryVersion()); - } - } - - - /** - * Tests that from/to repository version history is created correctly on the - * upgrade. - * - * @throws Exception - */ - @Test - public void testCreatePatchRevertUpgrade() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - // add a single ZK server and client on 2.1.1.0 - ServiceGroup serviceGroup = cluster.getServiceGroup(UpgradeResourceProvider.DUMMY_SERVICE_GROUP); - Service service = cluster.addService(serviceGroup, "HBASE", "HBASE", repoVersionEntity2110); - ServiceComponent component = service.addServiceComponent("HBASE_MASTER"); - ServiceComponentHost sch = component.addServiceComponentHost("h1"); - sch.setVersion("2.1.1.0"); - - File f = new File("src/test/resources/hbase_version_test.xml"); - repoVersionEntity2112.setType(RepositoryType.PATCH); - repoVersionEntity2112.setVersionXml(IOUtils.toString(new FileInputStream(f))); - repoVersionEntity2112.setVersionXsd("version_definition.xsd"); - repoVersionDao.merge(repoVersionEntity2112); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(0, upgrades.size()); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2112.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity upgradeEntity = upgrades.get(0); - assertEquals(RepositoryType.PATCH, upgradeEntity.getOrchestration()); - - // should be false since only finalization actually sets this bit - assertEquals(false, upgradeEntity.isRevertAllowed()); - - // fake it now so the rest of the test passes - upgradeEntity.setRevertAllowed(true); - upgradeEntity = upgradeDao.merge(upgradeEntity); - - // !!! make it look like the cluster is done - cluster.setUpgradeEntity(null); - - requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REVERT_UPGRADE_ID, upgradeEntity.getId()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(2, upgrades.size()); - - boolean found = false; - - Function function = new Function() { - @Override - public String apply(UpgradeHistoryEntity input) { - return input.getServiceName() + "/" + input.getComponentName(); - }; - }; - - for (UpgradeEntity upgrade : upgrades) { - if (upgrade.getId() != upgradeEntity.getId()) { - found = true; - assertEquals(upgradeEntity.getOrchestration(), upgrade.getOrchestration()); - - Collection upgradeEntityStrings = Collections2.transform(upgradeEntity.getHistory(), function); - Collection upgradeStrings = Collections2.transform(upgrade.getHistory(), function); - - Collection diff = CollectionUtils.disjunction(upgradeEntityStrings, upgradeStrings); - assertEquals("Verify the same set of components was orchestrated", 0, diff.size()); - } - } - - assertTrue(found); - } - - /** - * Tests that when there is no revertable upgrade, a reversion of a specific - * ugprade ID is not allowed. - */ - @Test(expected = SystemException.class) - public void testRevertFailsWhenNoRevertableUpgradeIsFound() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - // add a single ZK server and client on 2.1.1.0 - ServiceGroup serviceGroup = cluster.getServiceGroup(UpgradeResourceProvider.DUMMY_SERVICE_GROUP); - Service service = cluster.addService(serviceGroup, "HBASE", "HBASE", repoVersionEntity2110); - ServiceComponent component = service.addServiceComponent("HBASE_MASTER"); - ServiceComponentHost sch = component.addServiceComponentHost("h1"); - sch.setVersion("2.1.1.0"); - - File f = new File("src/test/resources/hbase_version_test.xml"); - repoVersionEntity2112.setType(RepositoryType.PATCH); - repoVersionEntity2112.setVersionXml(IOUtils.toString(new FileInputStream(f))); - repoVersionEntity2112.setVersionXsd("version_definition.xsd"); - repoVersionDao.merge(repoVersionEntity2112); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(0, upgrades.size()); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID,String.valueOf(repoVersionEntity2112.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - - upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(1, upgrades.size()); - - UpgradeEntity upgradeEntity = upgrades.get(0); - assertEquals(RepositoryType.PATCH, upgradeEntity.getOrchestration()); - - // !!! make it look like the cluster is done - cluster.setUpgradeEntity(null); - - requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REVERT_UPGRADE_ID, upgradeEntity.getId()); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - upgradeResourceProvider.createResources(request); - } - - @Test - public void testCreatePatchWithConfigChanges() throws Exception { - Cluster cluster = clusters.getCluster("c1"); - - File f = new File("src/test/resources/version_definition_test_patch_config.xml"); - repoVersionEntity2112.setType(RepositoryType.PATCH); - repoVersionEntity2112.setVersionXml(IOUtils.toString(new FileInputStream(f))); - repoVersionEntity2112.setVersionXsd("version_definition.xsd"); - repoVersionDao.merge(repoVersionEntity2112); - - List upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); - assertEquals(0, upgrades.size()); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2112.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - // !!! test that a PATCH upgrade skips config changes - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - RequestStatus status = upgradeResourceProvider.createResources(request); - Set resources = status.getAssociatedResources(); - assertEquals(1, resources.size()); - Long requestId = (Long) resources.iterator().next().getPropertyValue("Upgrade/request_id"); - assertNotNull(requestId); - - UpgradeEntity upgradeEntity = upgradeDao.findUpgradeByRequestId(requestId); - assertEquals(RepositoryType.PATCH, upgradeEntity.getOrchestration()); - - HostRoleCommandDAO hrcDAO = injector.getInstance(HostRoleCommandDAO.class); - List commands = hrcDAO.findByRequest(upgradeEntity.getRequestId()); - - boolean foundConfigTask = false; - for (HostRoleCommandEntity command : commands) { - if (StringUtils.isNotBlank(command.getCustomCommandName()) && - command.getCustomCommandName().equals(ConfigureAction.class.getName())) { - foundConfigTask = true; - break; - } - } - assertFalse(foundConfigTask); - - // !!! test that a patch with a supported patch change gets picked up - cluster.setUpgradeEntity(null); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test_force_config_change"); - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - - status = upgradeResourceProvider.createResources(request); - resources = status.getAssociatedResources(); - assertEquals(1, resources.size()); - requestId = (Long) resources.iterator().next().getPropertyValue("Upgrade/request_id"); - assertNotNull(requestId); - - upgradeEntity = upgradeDao.findUpgradeByRequestId(requestId); - assertEquals(RepositoryType.PATCH, upgradeEntity.getOrchestration()); - - commands = hrcDAO.findByRequest(upgradeEntity.getRequestId()); - - foundConfigTask = false; - for (HostRoleCommandEntity command : commands) { - if (StringUtils.isNotBlank(command.getCustomCommandName()) && - command.getCustomCommandName().equals(ConfigureAction.class.getName())) { - foundConfigTask = true; - - ExecutionCommandDAO dao = injector.getInstance(ExecutionCommandDAO.class); - ExecutionCommandEntity entity = dao.findByPK(command.getTaskId()); - ExecutionCommandWrapperFactory factory = injector.getInstance(ExecutionCommandWrapperFactory.class); - ExecutionCommandWrapper wrapper = factory.createFromJson(new String(entity.getCommand())); - Map params = wrapper.getExecutionCommand().getCommandParams(); - assertTrue(params.containsKey(ConfigureTask.PARAMETER_ASSOCIATED_SERVICE)); - assertEquals("ZOOKEEPER", params.get(ConfigureTask.PARAMETER_ASSOCIATED_SERVICE)); - - break; - } - } - assertTrue(foundConfigTask); - - - - // !!! test that a regular upgrade will pick up the config change - cluster.setUpgradeEntity(null); - repoVersionEntity2112.setType(RepositoryType.STANDARD); - repoVersionDao.merge(repoVersionEntity2112); - - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - - status = upgradeResourceProvider.createResources(request); - resources = status.getAssociatedResources(); - assertEquals(1, resources.size()); - requestId = (Long) resources.iterator().next().getPropertyValue("Upgrade/request_id"); - assertNotNull(requestId); - - upgradeEntity = upgradeDao.findUpgradeByRequestId(requestId); - assertEquals(RepositoryType.STANDARD, upgradeEntity.getOrchestration()); - - commands = hrcDAO.findByRequest(upgradeEntity.getRequestId()); - - foundConfigTask = false; - for (HostRoleCommandEntity command : commands) { - if (StringUtils.isNotBlank(command.getCustomCommandName()) && - command.getCustomCommandName().equals(ConfigureAction.class.getName())) { - foundConfigTask = true; - break; - } - } - assertTrue(foundConfigTask); - - } - - - - private String parseSingleMessage(String msgStr){ - JsonParser parser = new JsonParser(); - JsonArray msgArray = (JsonArray) parser.parse(msgStr); - JsonObject msg = (JsonObject) msgArray.get(0); - - return msg.get("message").getAsString(); - } - - /** - * Aborts and upgrade. - * - * @param requestId - * @throws Exception - */ - private void abortUpgrade(long requestId) throws Exception { - // now abort the upgrade so another can be created - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_ID, String.valueOf(requestId)); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REQUEST_STATUS, "ABORTED"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SUSPENDED, "false"); - Request request = PropertyHelper.getUpdateRequest(requestProps, null); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - upgradeResourceProvider.updateResources(request, null); - - // !!! this is required since the ActionManager/ActionScheduler isn't - // running and can't remove queued PENDING - it's a cheap way of ensuring - // that the upgrade commands do get aborted - hrcDAO.updateStatusByRequestId(requestId, HostRoleStatus.ABORTED, - HostRoleStatus.IN_PROGRESS_STATUSES); - } - - @Test - public void testTimeouts() throws Exception { - StackEntity stackEntity = stackDAO.find("HDP", "2.1.1"); - RepositoryVersionEntity repoVersionEntity = new RepositoryVersionEntity(); - repoVersionEntity.setDisplayName("My New Version 3"); - List operatingSystems = new ArrayList<>(); - RepoDefinitionEntity repoDefinitionEntity1 = new RepoDefinitionEntity(); - repoDefinitionEntity1.setRepoID("HDP-UTILS"); - repoDefinitionEntity1.setBaseUrl(""); - repoDefinitionEntity1.setRepoName("HDP-UTILS"); - RepoDefinitionEntity repoDefinitionEntity2 = new RepoDefinitionEntity(); - repoDefinitionEntity2.setRepoID("HDP"); - repoDefinitionEntity2.setBaseUrl(""); - repoDefinitionEntity2.setRepoName("HDP"); - RepoOsEntity repoOsEntity = new RepoOsEntity(); - repoOsEntity.setFamily("redhat6"); - repoOsEntity.setAmbariManaged(true); - repoOsEntity.addRepoDefinition(repoDefinitionEntity1); - repoOsEntity.addRepoDefinition(repoDefinitionEntity2); - operatingSystems.add(repoOsEntity); - - repoVersionEntity.addRepoOsEntities(operatingSystems); - repoVersionEntity.setStack(stackEntity); - repoVersionEntity.setVersion("2.2.2.3"); - repoVersionDao.create(repoVersionEntity); - - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - RequestStatus status = upgradeResourceProvider.createResources(request); - - - Set createdResources = status.getAssociatedResources(); - assertEquals(1, createdResources.size()); - Resource res = createdResources.iterator().next(); - Long id = (Long) res.getPropertyValue("Upgrade/request_id"); - assertNotNull(id); - assertEquals(Long.valueOf(1), id); - - - ActionManager am = injector.getInstance(ActionManager.class); - - List commands = am.getRequestTasks(id); - - boolean found = false; - - for (HostRoleCommand command : commands) { - ExecutionCommandWrapper wrapper = command.getExecutionCommandWrapper(); - - if (command.getRole().equals(Role.ZOOKEEPER_SERVER) && command.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND)) { - Map commandParams = wrapper.getExecutionCommand().getCommandParams(); - assertTrue(commandParams.containsKey(KeyNames.COMMAND_TIMEOUT)); - assertEquals("824",commandParams.get(KeyNames.COMMAND_TIMEOUT)); - found = true; - } - } - - assertTrue("ZooKeeper timeout override was found", found); - - } - - /** - * Tests that commands created for {@link org.apache.ambari.server.state.stack.upgrade.StageWrapper.Type#UPGRADE_TASKS} set the - * service and component on the {@link ExecutionCommand}. - *

- * Without this, commands of this type would not be able to determine which - * service/component repository they should use when the command is scheduled - * to run. - * - * @throws Exception - */ - @Test - public void testExecutionCommandServiceAndComponent() throws Exception { - Map requestProps = new HashMap<>(); - requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1"); - requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId())); - requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_execute_task_test"); - requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true"); - requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - - ResourceProvider upgradeResourceProvider = createProvider(amc); - - Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null); - RequestStatus status = upgradeResourceProvider.createResources(request); - - Set createdResources = status.getAssociatedResources(); - assertEquals(1, createdResources.size()); - Resource res = createdResources.iterator().next(); - Long id = (Long) res.getPropertyValue("Upgrade/request_id"); - assertNotNull(id); - assertEquals(Long.valueOf(1), id); - - - ActionManager am = injector.getInstance(ActionManager.class); - List commands = am.getRequestTasks(id); - - boolean foundActionExecuteCommand = false; - for (HostRoleCommand command : commands) { - ExecutionCommand executionCommand = command.getExecutionCommandWrapper().getExecutionCommand(); - if (StringUtils.equals(UpgradeResourceProvider.EXECUTE_TASK_ROLE, - executionCommand.getRole())) { - foundActionExecuteCommand = true; - assertNotNull(executionCommand.getServiceName()); - assertNotNull(executionCommand.getComponentName()); - } - } - - assertTrue( - "There was no task found with the role of " + UpgradeResourceProvider.EXECUTE_TASK_ROLE, - foundActionExecuteCommand); - } - - /** - * - */ - private class MockModule implements Module { - /** - * - */ - @Override - public void configure(Binder binder) { - binder.bind(ConfigHelper.class).toInstance(configHelper); - } - } -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java deleted file mode 100644 index 5439a6d96e4..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java +++ /dev/null @@ -1,1854 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.serveraction.upgrades; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.persistence.EntityManager; - -import org.apache.ambari.server.AmbariException; -import org.apache.ambari.server.H2DatabaseCleaner; -import org.apache.ambari.server.ServiceComponentNotFoundException; -import org.apache.ambari.server.ServiceNotFoundException; -import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper; -import org.apache.ambari.server.actionmanager.HostRoleCommand; -import org.apache.ambari.server.actionmanager.HostRoleCommandFactory; -import org.apache.ambari.server.agent.CommandReport; -import org.apache.ambari.server.agent.ExecutionCommand; -import org.apache.ambari.server.orm.GuiceJpaInitializer; -import org.apache.ambari.server.orm.InMemoryDefaultTestModule; -import org.apache.ambari.server.orm.OrmTestHelper; -import org.apache.ambari.server.orm.dao.RequestDAO; -import org.apache.ambari.server.orm.dao.UpgradeDAO; -import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; -import org.apache.ambari.server.orm.entities.RequestEntity; -import org.apache.ambari.server.orm.entities.UpgradeEntity; -import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity; -import org.apache.ambari.server.serveraction.ServerAction; -import org.apache.ambari.server.state.Cluster; -import org.apache.ambari.server.state.Clusters; -import org.apache.ambari.server.state.Config; -import org.apache.ambari.server.state.ConfigFactory; -import org.apache.ambari.server.state.ConfigHelper; -import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.Service; -import org.apache.ambari.server.state.ServiceComponent; -import org.apache.ambari.server.state.ServiceComponentFactory; -import org.apache.ambari.server.state.ServiceComponentHost; -import org.apache.ambari.server.state.ServiceComponentHostFactory; -import org.apache.ambari.server.state.ServiceFactory; -import org.apache.ambari.server.state.ServiceGroup; -import org.apache.ambari.server.state.StackId; -import org.apache.ambari.server.state.State; -import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.ConfigurationKeyValue; -import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Insert; -import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.InsertType; -import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Replace; -import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Transfer; -import org.apache.ambari.server.state.stack.upgrade.ConfigureTask; -import org.apache.ambari.server.state.stack.upgrade.PropertyKeyState; -import org.apache.ambari.server.state.stack.upgrade.TransferCoercionType; -import org.apache.ambari.server.state.stack.upgrade.TransferOperation; -import org.apache.ambari.server.state.stack.upgrade.UpgradeType; -import org.apache.commons.lang3.StringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import com.google.gson.Gson; -import com.google.inject.Guice; -import com.google.inject.Inject; -import com.google.inject.Injector; - -/** - * Tests upgrade-related server side actions - */ -public class ConfigureActionTest { - - @Inject - private Injector m_injector; - - @Inject - private OrmTestHelper m_helper; - - @Inject - private HostRoleCommandFactory hostRoleCommandFactory; - - @Inject - private ServiceFactory serviceFactory; - - @Inject - private ConfigHelper m_configHelper; - - @Inject - private Clusters clusters; - - @Inject - private ConfigFactory configFactory; - - @Inject - private ConfigureAction action; - - @Inject - private RequestDAO requestDAO; - - @Inject - private UpgradeDAO upgradeDAO; - - @Inject - private ServiceComponentFactory serviceComponentFactory; - - @Inject - private ServiceComponentHostFactory serviceComponentHostFactory; - - private RepositoryVersionEntity repoVersion2110; - private RepositoryVersionEntity repoVersion2111; - private RepositoryVersionEntity repoVersion2200; - - private final Map> NO_ATTRIBUTES = new HashMap<>(); - - @Before - public void setup() throws Exception { - m_injector = Guice.createInjector(new InMemoryDefaultTestModule()); - m_injector.getInstance(GuiceJpaInitializer.class); - m_injector.injectMembers(this); - - repoVersion2110 = m_helper.getOrCreateRepositoryVersion(new StackId("HDP-2.1.1"), "2.1.1.0-1234"); - repoVersion2111 = m_helper.getOrCreateRepositoryVersion(new StackId("HDP-2.1.1"), "2.1.1.1-5678"); - repoVersion2200 = m_helper.getOrCreateRepositoryVersion(new StackId("HDP-2.2.0"), "2.2.0.0-1234"); - - makeUpgradeCluster(); - } - - @After - public void teardown() throws Exception { - H2DatabaseCleaner.clearDatabase(m_injector.getProvider(EntityManager.class).get()); - } - - - /** - * Tests that a new configuration is created when upgrading across stack when - * there is no existing configuration with the correct target stack. - * - * @throws Exception - */ - @Test - public void testNewConfigCreatedWhenUpgradingAcrossStacks() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse(StringUtils.equals("version2", config.getTag())); - assertEquals("11", config.getProperties().get("initLimit")); - } - - /** - * Tests that if a configuration with the target stack already exists, then it - * will be re-used instead of a new one created. - * - * @throws Exception - */ - @Test - public void testConfigurationWithTargetStackUsed() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - } - }; - - Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertEquals("version2", config.getTag()); - assertEquals("11", config.getProperties().get("initLimit")); - } - - /** - * Tests that DELETE "*" with edit preserving works correctly. - * - * @throws Exception - */ - @Test - public void testDeletePreserveChanges() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - // create a config for zoo.cfg with two values; one is a stack value and the - // other is custom - Map properties = new HashMap() { - { - put("tickTime", "2000"); - put("foo", "bar"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - - createUpgrade(c, repoVersion2111); - - // delete all keys, preserving edits or additions - List transfers = new ArrayList<>(); - Transfer transfer = new Transfer(); - transfer.operation = TransferOperation.DELETE; - transfer.deleteKey = "*"; - transfer.preserveEdits = true; - transfers.add(transfer); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - // make sure there are now 3 versions after the merge - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - // time to check our values; there should only be 1 left since tickTime was - // removed - Map map = config.getProperties(); - assertEquals("bar", map.get("foo")); - assertFalse(map.containsKey("tickTime")); - } - - @Test - public void testConfigTransferCopy() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - put("copyIt", "10"); - put("moveIt", "10"); - put("deleteIt", "10"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - // normal copy - List transfers = new ArrayList<>(); - Transfer transfer = new Transfer(); - transfer.operation = TransferOperation.COPY; - transfer.fromKey = "copyIt"; - transfer.toKey = "copyKey"; - transfers.add(transfer); - - // copy with default - transfer = new Transfer(); - transfer.operation = TransferOperation.COPY; - transfer.fromKey = "copiedFromMissingKeyWithDefault"; - transfer.toKey = "copiedToMissingKeyWithDefault"; - transfer.defaultValue = "defaultValue"; - transfers.add(transfer); - - // normal move - transfer = new Transfer(); - transfer.operation = TransferOperation.MOVE; - transfer.fromKey = "moveIt"; - transfer.toKey = "movedKey"; - transfers.add(transfer); - - // move with default - transfer = new Transfer(); - transfer.operation = TransferOperation.MOVE; - transfer.fromKey = "movedFromKeyMissingWithDefault"; - transfer.toKey = "movedToMissingWithDefault"; - transfer.defaultValue = "defaultValue2"; - transfer.mask = true; - transfers.add(transfer); - - transfer = new Transfer(); - transfer.operation = TransferOperation.DELETE; - transfer.deleteKey = "deleteIt"; - transfers.add(transfer); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals("11", map.get("initLimit")); - assertEquals("10", map.get("copyIt")); - assertTrue(map.containsKey("copyKey")); - assertEquals(map.get("copyIt"), map.get("copyKey")); - assertFalse(map.containsKey("moveIt")); - assertTrue(map.containsKey("movedKey")); - assertFalse(map.containsKey("deletedKey")); - assertTrue(map.containsKey("copiedToMissingKeyWithDefault")); - assertEquals("defaultValue", map.get("copiedToMissingKeyWithDefault")); - assertTrue(map.containsKey("movedToMissingWithDefault")); - assertEquals("defaultValue2", map.get("movedToMissingWithDefault")); - - transfers.clear(); - transfer = new Transfer(); - transfer.operation = TransferOperation.DELETE; - transfer.deleteKey = "*"; - transfer.preserveEdits = true; - transfer.keepKeys.add("copyKey"); - // The below key should be ignored/not added as it doesn't exist originally as part of transfer. - transfer.keepKeys.add("keyNotExisting"); - // The 'null' passed as part of key should be ignored as part of transfer operation. - transfer.keepKeys.add(null); - - - transfers.add(transfer); - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - report = action.execute(null); - assertNotNull(report); - - assertEquals(4, c.getConfigsByType("zoo.cfg").size()); - config = c.getDesiredConfigByType("zoo.cfg"); - map = config.getProperties(); - assertEquals(6, map.size()); - assertTrue(map.containsKey("initLimit")); // it just changed to 11 from 10 - assertTrue(map.containsKey("copyKey")); // is new - // Below two keys should not have been added in the map. - assertFalse(map.containsKey("keyNotExisting")); - assertFalse(map.containsKey(null)); - } - - @Test - public void testCoerceValueOnCopy() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("zoo.server.csv", "c6401,c6402, c6403"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - - // copy with coerce - List transfers = new ArrayList<>(); - Transfer transfer = new Transfer(); - transfer.operation = TransferOperation.COPY; - transfer.coerceTo = TransferCoercionType.YAML_ARRAY; - transfer.fromKey = "zoo.server.csv"; - transfer.toKey = "zoo.server.array"; - transfer.defaultValue = "['foo','bar']"; - transfers.add(transfer); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals("c6401,c6402, c6403", map.get("zoo.server.csv")); - assertEquals("['c6401','c6402','c6403']", map.get("zoo.server.array")); - } - - @Test - public void testValueReplacement() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("key_to_replace", "My New Cat"); - put("key_with_no_match", "WxyAndZ"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - - // Replacement task - List replacements = new ArrayList<>(); - Replace replace = new Replace(); - replace.key = "key_to_replace"; - replace.find = "New Cat"; - replace.replaceWith = "Wet Dog"; - replacements.add(replace); - - replace = new Replace(); - replace.key = "key_with_no_match"; - replace.find = "abc"; - replace.replaceWith = "def"; - replacements.add(replace); - - commandParams.put(ConfigureTask.PARAMETER_REPLACEMENTS, new Gson().toJson(replacements)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - assertEquals("My Wet Dog", config.getProperties().get("key_to_replace")); - assertEquals("WxyAndZ", config.getProperties().get("key_with_no_match")); - } - - /** - * Tests that replacing a {@code null} value works. - * - * @throws Exception - */ - @Test - public void testValueReplacementWithMissingConfigurations() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("existing", "This exists!"); - put("missing", null); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - - // Replacement task - List replacements = new ArrayList<>(); - Replace replace = new Replace(); - replace.key = "missing"; - replace.find = "foo"; - replace.replaceWith = "bar"; - replacements.add(replace); - - commandParams.put(ConfigureTask.PARAMETER_REPLACEMENTS, new Gson().toJson(replacements)); - - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setCommandParams(commandParams); - executionCommand.setClusterName("c1"); - executionCommand.setRoleParams(new HashMap<>()); - executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username"); - - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertEquals(null, config.getProperties().get("missing")); - } - - @Test - public void testMultipleKeyValuesPerTask() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("fooKey", "barValue"); - } - }; - - Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - // create several configurations - List configurations = new ArrayList<>(); - ConfigurationKeyValue fooKey2 = new ConfigurationKeyValue(); - configurations.add(fooKey2); - fooKey2.key = "fooKey2"; - fooKey2.value = "barValue2"; - - ConfigurationKeyValue fooKey3 = new ConfigurationKeyValue(); - configurations.add(fooKey3); - fooKey3.key = "fooKey3"; - fooKey3.value = "barValue3"; - fooKey3.mask = true; - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertEquals("barValue", config.getProperties().get("fooKey")); - assertEquals("barValue2", config.getProperties().get("fooKey2")); - assertEquals("barValue3", config.getProperties().get("fooKey3")); - assertTrue(report.getStdOut().contains("******")); - - } - - @Test - public void testAllowedSet() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("set.key.1", "s1"); - put("set.key.2", "s2"); - put("set.key.3", "s3"); - put("set.key.4", "s4"); - } - }; - - Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - // create several configurations - List configurations = new ArrayList<>(); - ConfigurationKeyValue fooKey1 = new ConfigurationKeyValue(); - configurations.add(fooKey1); - fooKey1.key = "fooKey1"; - fooKey1.value = "barValue1"; - - ConfigurationKeyValue fooKey2 = new ConfigurationKeyValue(); - configurations.add(fooKey2); - fooKey2.key = "fooKey2"; - fooKey2.value = "barValue2"; - - ConfigurationKeyValue fooKey3 = new ConfigurationKeyValue(); - configurations.add(fooKey3); - fooKey3.key = "fooKey3"; - fooKey3.value = "barValue3"; - fooKey3.ifKey = "set.key.1"; - fooKey3.ifType = "zoo.cfg"; - fooKey3.ifValue = "s1"; - - ConfigurationKeyValue fooKey4 = new ConfigurationKeyValue(); - configurations.add(fooKey4); - fooKey4.key = "fooKey4"; - fooKey4.value = "barValue4"; - fooKey4.ifKey = "set.key.2"; - fooKey4.ifType = "zoo.cfg"; - fooKey4.ifKeyState= PropertyKeyState.PRESENT; - - ConfigurationKeyValue fooKey5 = new ConfigurationKeyValue(); - configurations.add(fooKey5); - fooKey5.key = "fooKey5"; - fooKey5.value = "barValue5"; - fooKey5.ifKey = "abc"; - fooKey5.ifType = "zoo.cfg"; - fooKey5.ifKeyState= PropertyKeyState.ABSENT; - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertEquals("barValue1", config.getProperties().get("fooKey1")); - assertEquals("barValue2", config.getProperties().get("fooKey2")); - assertEquals("barValue3", config.getProperties().get("fooKey3")); - assertEquals("barValue4", config.getProperties().get("fooKey4")); - assertEquals("barValue5", config.getProperties().get("fooKey5")); - assertEquals("s1", config.getProperties().get("set.key.1")); - assertEquals("s2", config.getProperties().get("set.key.2")); - assertEquals("s3", config.getProperties().get("set.key.3")); - assertEquals("s4", config.getProperties().get("set.key.4")); - } - - @Test - public void testDisallowedSet() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("set.key.1", "s1"); - put("set.key.2", "s2"); - put("set.key.3", "s3"); - put("set.key.4", "s4"); - } - }; - - Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - // create several configurations - List configurations = new ArrayList<>(); - ConfigurationKeyValue fooKey3 = new ConfigurationKeyValue(); - configurations.add(fooKey3); - fooKey3.key = "fooKey3"; - fooKey3.value = "barValue3"; - fooKey3.ifKey = "set.key.1"; - fooKey3.ifType = "zoo.cfg"; - fooKey3.ifValue = "no-such-value"; - - ConfigurationKeyValue fooKey4 = new ConfigurationKeyValue(); - configurations.add(fooKey4); - fooKey4.key = "fooKey4"; - fooKey4.value = "barValue4"; - fooKey4.ifKey = "set.key.2"; - fooKey4.ifType = "zoo.cfg"; - fooKey4.ifKeyState= PropertyKeyState.ABSENT; - - ConfigurationKeyValue fooKey5 = new ConfigurationKeyValue(); - configurations.add(fooKey5); - fooKey5.key = "fooKey5"; - fooKey5.value = "barValue5"; - fooKey5.ifKey = "abc"; - fooKey5.ifType = "zoo.cfg"; - fooKey5.ifKeyState= PropertyKeyState.PRESENT; - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertEquals("s1", config.getProperties().get("set.key.1")); - assertEquals("s2", config.getProperties().get("set.key.2")); - assertEquals("s3", config.getProperties().get("set.key.3")); - assertEquals("s4", config.getProperties().get("set.key.4")); - assertFalse(config.getProperties().containsKey("fooKey3")); - assertFalse(config.getProperties().containsKey("fooKey4")); - assertFalse(config.getProperties().containsKey("fooKey5")); - } - - @Test - public void testAllowedReplacment() throws Exception { - - - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("replace.key.1", "r1"); - put("replace.key.2", "r2"); - put("replace.key.3", "r3a1"); - put("replace.key.4", "r4"); - put("replace.key.5", "r5"); - } - }; - - Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - // create several configurations - List replacements = new ArrayList<>(); - Replace replace = new Replace(); - replace.key = "replace.key.3"; - replace.find = "a"; - replace.replaceWith = "A"; - replacements.add(replace); - - Replace replace2 = new Replace(); - replacements.add(replace2); - replace2.key = "replace.key.4"; - replace2.find = "r"; - replace2.replaceWith = "R"; - replace2.ifKey = "replace.key.1"; - replace2.ifType = "zoo.cfg"; - replace2.ifValue = "r1"; - replacements.add(replace2); - - Replace replace3 = new Replace(); - replacements.add(replace3); - replace3.key = "replace.key.2"; - replace3.find = "r"; - replace3.replaceWith = "R"; - replace3.ifKey = "replace.key.1"; - replace3.ifType = "zoo.cfg"; - replace3.ifKeyState = PropertyKeyState.PRESENT; - replacements.add(replace3); - - Replace replace4 = new Replace(); - replacements.add(replace3); - replace4.key = "replace.key.5"; - replace4.find = "r"; - replace4.replaceWith = "R"; - replace4.ifKey = "no.such.key"; - replace4.ifType = "zoo.cfg"; - replace4.ifKeyState = PropertyKeyState.ABSENT; - replacements.add(replace4); - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_REPLACEMENTS, new Gson().toJson(replacements)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertEquals("r1", config.getProperties().get("replace.key.1")); - assertEquals("R2", config.getProperties().get("replace.key.2")); - assertEquals("r3A1", config.getProperties().get("replace.key.3")); - assertEquals("R4", config.getProperties().get("replace.key.4")); - assertEquals("R5", config.getProperties().get("replace.key.5")); - } - - @Test - public void testDisallowedReplacment() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("replace.key.1", "r1"); - put("replace.key.2", "r2"); - put("replace.key.3", "r3a1"); - put("replace.key.4", "r4"); - put("replace.key.5", "r5"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - // create several configurations - List replacements = new ArrayList<>(); - - Replace replace2 = new Replace(); - replacements.add(replace2); - replace2.key = "replace.key.4"; - replace2.find = "r"; - replace2.replaceWith = "R"; - replace2.ifKey = "replace.key.1"; - replace2.ifType = "zoo.cfg"; - replace2.ifValue = "not-this-value"; - replacements.add(replace2); - - Replace replace3 = new Replace(); - replacements.add(replace3); - replace3.key = "replace.key.2"; - replace3.find = "r"; - replace3.replaceWith = "R"; - replace3.ifKey = "replace.key.1"; - replace3.ifType = "zoo.cfg"; - replace3.ifKeyState = PropertyKeyState.ABSENT; - replacements.add(replace3); - - Replace replace4 = new Replace(); - replacements.add(replace3); - replace4.key = "replace.key.5"; - replace4.find = "r"; - replace4.replaceWith = "R"; - replace4.ifKey = "no.such.key"; - replace4.ifType = "zoo.cfg"; - replace4.ifKeyState = PropertyKeyState.PRESENT; - replacements.add(replace4); - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_REPLACEMENTS, new Gson().toJson(replacements)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertEquals("r1", config.getProperties().get("replace.key.1")); - assertEquals("r2", config.getProperties().get("replace.key.2")); - assertEquals("r3a1", config.getProperties().get("replace.key.3")); - assertEquals("r4", config.getProperties().get("replace.key.4")); - assertEquals("r5", config.getProperties().get("replace.key.5")); - } - - @Test - public void testAllowedTransferCopy() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - put("copy.key.1", "c1"); - put("copy.key.2", "c2"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2200); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - // normal copy - List transfers = new ArrayList<>(); - Transfer transfer1 = new Transfer(); - transfer1.operation = TransferOperation.COPY; - transfer1.fromKey = "copy.key.1"; - transfer1.toKey = "copy.to.key.1"; - transfers.add(transfer1); - - // copy with default - Transfer transfer2 = new Transfer(); - transfer2.operation = TransferOperation.COPY; - transfer2.fromKey = "copy.key.no.need.to.exit.1"; - transfer2.toKey = "copy.to.key.with.default.1"; - transfer2.defaultValue = "defaultValue"; - transfers.add(transfer2); - - Transfer transfer3 = new Transfer(); - transfer3.operation = TransferOperation.COPY; - transfer3.fromKey = "copy.key.2"; - transfer3.toKey = "copy.to.key.2"; - transfer3.ifKey = "initLimit"; - transfer3.ifType = "zoo.cfg"; - transfer3.ifValue = "10"; - transfers.add(transfer3); - - Transfer transfer4 = new Transfer(); - transfer4.operation = TransferOperation.COPY; - transfer4.fromKey = "copy.key.2"; - transfer4.toKey = "copy.to.key.3"; - transfer4.ifKey = "initLimit"; - transfer4.ifType = "zoo.cfg"; - transfer4.ifKeyState = PropertyKeyState.PRESENT; - transfers.add(transfer4); - - Transfer transfer5 = new Transfer(); - transfer5.operation = TransferOperation.COPY; - transfer5.fromKey = "copy.key.no.need.to.exist.2"; - transfer5.toKey = "copy.to.key.with.default.2"; - transfer5.defaultValue = "defaultValue2"; - transfer5.ifKey = "no.such.key"; - transfer5.ifType = "zoo.cfg"; - transfer5.ifKeyState = PropertyKeyState.ABSENT; - transfers.add(transfer5); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals(8, map.size()); - assertEquals("11", map.get("initLimit")); - assertEquals(map.get("copy.key.1"), map.get("copy.to.key.1")); - assertTrue(!map.containsKey("copy.key.no.need.to.exit.1")); - assertEquals("defaultValue", map.get("copy.to.key.with.default.1")); - assertTrue(!map.containsKey("copy.key.no.need.to.exit.2")); - assertEquals("defaultValue2", map.get("copy.to.key.with.default.2")); - assertEquals(map.get("copy.key.2"), map.get("copy.to.key.2")); - assertEquals(map.get("copy.key.2"), map.get("copy.to.key.3")); - } - - @Test - public void testDisallowedTransferCopy() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - put("copy.key.1", "c1"); - put("copy.key.2", "c2"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - List transfers = new ArrayList<>(); - Transfer transfer = new Transfer(); - transfer.operation = TransferOperation.COPY; - transfer.fromKey = "copy.key.2"; - transfer.toKey = "copy.to.key.2"; - transfer.ifKey = "initLimit"; - transfer.ifType = "zoo.cfg"; - transfer.ifValue = "not-the-real-value"; - transfers.add(transfer); - - transfer = new Transfer(); - transfer.operation = TransferOperation.COPY; - transfer.fromKey = "copy.key.2"; - transfer.toKey = "copy.to.key.3"; - transfer.ifKey = "initLimit"; - transfer.ifType = "zoo.cfg"; - transfer.ifKeyState = PropertyKeyState.ABSENT; - transfers.add(transfer); - - transfer = new Transfer(); - transfer.operation = TransferOperation.COPY; - transfer.fromKey = "copy.key.no.need.to.exist.2"; - transfer.toKey = "copy.to.key.with.default.2"; - transfer.defaultValue = "defaultValue2"; - transfer.ifKey = "no.such.key"; - transfer.ifType = "zoo.cfg"; - transfer.ifKeyState = PropertyKeyState.PRESENT; - transfers.add(transfer); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setCommandParams(commandParams); - executionCommand.setClusterName("c1"); - executionCommand.setRoleParams(new HashMap<>()); - executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username"); - - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals(3, map.size()); - assertEquals("11", map.get("initLimit")); - assertEquals("c1", map.get("copy.key.1")); - assertEquals("c2", map.get("copy.key.2")); - } - - @Test - public void testAllowedTransferMove() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - put("move.key.1", "m1"); - put("move.key.2", "m2"); - put("move.key.3", "m3"); - put("move.key.4", "m4"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - List transfers = new ArrayList<>(); - Transfer transfer1 = new Transfer(); - transfer1.operation = TransferOperation.MOVE; - transfer1.fromKey = "move.key.1"; - transfer1.toKey = "move.to.key.1"; - transfers.add(transfer1); - - Transfer transfer2 = new Transfer(); - transfer2.operation = TransferOperation.MOVE; - transfer2.fromKey = "move.key.2"; - transfer2.toKey = "move.to.key.2"; - transfer2.ifKey = "initLimit"; - transfer2.ifType = "zoo.cfg"; - transfer2.ifValue = "10"; - transfers.add(transfer2); - - Transfer transfer3 = new Transfer(); - transfer3.operation = TransferOperation.MOVE; - transfer3.fromKey = "move.key.3"; - transfer3.toKey = "move.to.key.3"; - transfer3.ifKey = "initLimit"; - transfer3.ifType = "zoo.cfg"; - transfer3.ifKeyState = PropertyKeyState.PRESENT; - transfers.add(transfer3); - - Transfer transfer4 = new Transfer(); - transfer4.operation = TransferOperation.MOVE; - transfer4.fromKey = "move.key.4"; - transfer4.toKey = "move.to.key.4"; - transfer4.ifKey = "no.such.key"; - transfer4.ifType = "zoo.cfg"; - transfer4.ifKeyState = PropertyKeyState.ABSENT; - transfers.add(transfer4); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals(5, map.size()); - String[] shouldNotExitKeys = new String[]{"move.key.1", "move.key.2", "move.key.3", "move.key.4"}; - String[] shouldExitKeys = new String[]{"move.to.key.1", "move.to.key.2", "move.to.key.3", "move.to.key.4"}; - for(String key: shouldNotExitKeys){ - assertFalse(map.containsKey(key)); - } - - for(String key: shouldExitKeys){ - assertTrue(map.containsKey(key)); - } - } - - @Test - public void testDisallowedTransferMove() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - put("move.key.1", "m1"); - put("move.key.2", "m2"); - put("move.key.3", "m3"); - put("move.key.4", "m4"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - List transfers = new ArrayList<>(); - Transfer transfer2 = new Transfer(); - transfer2.operation = TransferOperation.MOVE; - transfer2.fromKey = "move.key.2"; - transfer2.toKey = "move.to.key.2"; - transfer2.ifKey = "initLimit"; - transfer2.ifType = "zoo.cfg"; - transfer2.ifValue = "not-real-value"; - transfers.add(transfer2); - - Transfer transfer3 = new Transfer(); - transfer3.operation = TransferOperation.MOVE; - transfer3.fromKey = "move.key.3"; - transfer3.toKey = "move.to.key.3"; - transfer3.ifKey = "initLimit"; - transfer3.ifType = "zoo.cfg"; - transfer3.ifKeyState = PropertyKeyState.ABSENT; - transfers.add(transfer3); - - Transfer transfer4 = new Transfer(); - transfer4.operation = TransferOperation.MOVE; - transfer4.fromKey = "move.key.4"; - transfer4.toKey = "move.to.key.4"; - transfer4.ifKey = "no.such.key"; - transfer4.ifType = "zoo.cfg"; - transfer4.ifKeyState = PropertyKeyState.PRESENT; - transfers.add(transfer3); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals(5, map.size()); - - String[] shouldExitKeys = new String[]{"move.key.1", "move.key.2", "move.key.3", "move.key.4"}; - String[] shouldNotExitKeys = new String[]{"move.to.key.1", "move.to.key.2", "move.to.key.3", "move.to.key.4"}; - for(String key: shouldNotExitKeys){ - assertFalse(map.containsKey(key)); - } - - for(String key: shouldExitKeys){ - assertTrue(map.containsKey(key)); - } - } - - @Test - public void testAllowedTransferDelete() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - put("delete.key.1", "d1"); - put("delete.key.2", "d2"); - put("delete.key.3", "d3"); - put("delete.key.4", "d4"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - List transfers = new ArrayList<>(); - Transfer transfer1 = new Transfer(); - transfer1.operation = TransferOperation.DELETE; - transfer1.deleteKey = "delete.key.1"; - transfers.add(transfer1); - - Transfer transfer2 = new Transfer(); - transfer2.operation = TransferOperation.DELETE; - transfer2.deleteKey = "delete.key.2"; - transfer2.ifKey = "initLimit"; - transfer2.ifType = "zoo.cfg"; - transfer2.ifValue = "10"; - transfers.add(transfer2); - - Transfer transfer3 = new Transfer(); - transfer3.operation = TransferOperation.DELETE; - transfer3.deleteKey = "delete.key.3"; - transfer3.ifKey = "initLimit"; - transfer3.ifType = "zoo.cfg"; - transfer3.ifKeyState = PropertyKeyState.PRESENT; - transfers.add(transfer3); - - Transfer transfer4 = new Transfer(); - transfer4.operation = TransferOperation.DELETE; - transfer4.deleteKey = "delete.key.4"; - transfer4.ifKey = "no.such.key"; - transfer4.ifType = "zoo.cfg"; - transfer4.ifKeyState = PropertyKeyState.ABSENT; - transfers.add(transfer4); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals(1, map.size()); - assertEquals("11", map.get("initLimit")); - String[] shouldNotExitKeys = new String[]{"delete.key.1","delete.key.2","delete.key.3","delete.key.4"}; - for(String key: shouldNotExitKeys){ - assertFalse(map.containsKey(key)); - } - } - - @Test - public void testDisallowedTransferDelete() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - put("delete.key.1", "d1"); - put("delete.key.2", "d2"); - put("delete.key.3", "d3"); - put("delete.key.4", "d4"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - List configurations = new ArrayList<>(); - ConfigurationKeyValue keyValue = new ConfigurationKeyValue(); - configurations.add(keyValue); - keyValue.key = "initLimit"; - keyValue.value = "11"; - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations)); - - List transfers = new ArrayList<>(); - Transfer transfer2 = new Transfer(); - transfer2.operation = TransferOperation.DELETE; - transfer2.deleteKey = "delete.key.2"; - transfer2.ifKey = "initLimit"; - transfer2.ifType = "zoo.cfg"; - transfer2.ifValue = "not.real.value"; - transfers.add(transfer2); - - Transfer transfer3 = new Transfer(); - transfer3.operation = TransferOperation.DELETE; - transfer3.deleteKey = "delete.key.3"; - transfer3.ifKey = "initLimit"; - transfer3.ifType = "zoo.cfg"; - transfer3.ifKeyState = PropertyKeyState.ABSENT; - transfers.add(transfer3); - - Transfer transfer4 = new Transfer(); - transfer4.operation = TransferOperation.DELETE; - transfer4.deleteKey = "delete.key.4"; - transfer4.ifKey = "no.such.key"; - transfer4.ifType = "zoo.cfg"; - transfer4.ifKeyState = PropertyKeyState.PRESENT; - transfers.add(transfer4); - - commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, - null, null); - - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper( - executionCommand)); - - - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - Map map = config.getProperties(); - assertEquals(5, map.size()); - assertEquals("11", map.get("initLimit")); - String[] shouldExitKeys = new String[]{"delete.key.1","delete.key.2","delete.key.3","delete.key.4"}; - for(String key: shouldExitKeys){ - assertTrue(map.containsKey(key)); - } - } - - /** - * Tests using the {@code } element in a configuration upgrade pack. - * - * @throws Exception - */ - @Test - public void testInsert() throws Exception { - Cluster c = clusters.getCluster("c1"); - assertEquals(1, c.getConfigsByType("zoo.cfg").size()); - - Map properties = new HashMap() { - { - put("key_to_append", "append"); - put("key_to_prepend", "prepend"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - assertEquals(2, c.getConfigsByType("zoo.cfg").size()); - - createUpgrade(c, repoVersion2111); - - Map commandParams = new HashMap<>(); - commandParams.put("clusterName", "c1"); - commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg"); - - // define the changes - final String prependValue = "This should be on a newline"; - final String appendValue = " this will be after..."; - - // insert tasks - List insertions = new ArrayList<>(); - - Insert prepend = new Insert(); - prepend.insertType = InsertType.PREPEND; - prepend.key = "key_to_prepend"; - prepend.value = prependValue; - prepend.newlineBefore = false; - prepend.newlineAfter = true; - - Insert append = new Insert(); - append.insertType = InsertType.APPEND; - append.key = "key_to_append"; - append.value = appendValue; - append.newlineBefore = false; - append.newlineAfter = false; - - // add them to the list - insertions.add(prepend); - insertions.add(append); - - // just for fun, add them again - this will test their idempotence - insertions.add(prepend); - insertions.add(append); - - commandParams.put(ConfigureTask.PARAMETER_INSERTIONS, new Gson().toJson(insertions)); - - ExecutionCommand executionCommand = getExecutionCommand(commandParams); - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(3, c.getConfigsByType("zoo.cfg").size()); - - config = c.getDesiredConfigByType("zoo.cfg"); - assertNotNull(config); - assertFalse("version2".equals(config.getTag())); - - // build the expected values - String expectedPrepend = prependValue + System.lineSeparator() + "prepend"; - String expectedAppend = "append" + appendValue; - - assertEquals(expectedPrepend, config.getProperties().get("key_to_prepend")); - assertEquals(expectedAppend, config.getProperties().get("key_to_append")); - } - - /** - * Creates a cluster using {@link #repoVersion2110} with ZooKeeper installed. - */ - private void makeUpgradeCluster() throws Exception { - String clusterName = "c1"; - String hostName = "h1"; - - clusters.addCluster(clusterName, repoVersion2110.getStackId()); - - Cluster c = clusters.getCluster(clusterName); - - // add a host component - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - - // !!! very important, otherwise the loops that walk the list of installed - // service properties will not run! - ServiceGroup serviceGroup = c.addServiceGroup("CORE", repoVersion2110.getStackId().getStackId()); - Service service = installService(c, serviceGroup, "ZOOKEEPER", repoVersion2110); - addServiceComponent(c, service, "ZOOKEEPER_SERVER"); - addServiceComponent(c, service, "ZOOKEEPER_CLIENT"); - createNewServiceComponentHost(c, "ZOOKEEPER", "ZOOKEEPER_SERVER", hostName); - createNewServiceComponentHost(c, "ZOOKEEPER", "ZOOKEEPER_CLIENT", hostName); - - Map properties = new HashMap() { - { - put("initLimit", "10"); - } - }; - - Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version1", properties); - - c.addDesiredConfig("user", Collections.singleton(config)); - - // verify that our configs are there - String tickTime = m_configHelper.getPropertyValueFromStackDefinitions(c, "zoo.cfg", "tickTime"); - assertNotNull(tickTime); - } - - /** - * Installs a service in the cluster. - * - * @param cluster - * @param serviceGroup - * @param serviceName - * @return - * @throws AmbariException - */ - private Service installService(Cluster cluster, ServiceGroup serviceGroup, String serviceName, - RepositoryVersionEntity repositoryVersion - ) throws AmbariException { - Service service; - - try { - service = cluster.getService(serviceName); - } catch (ServiceNotFoundException e) { - service = serviceFactory.createNew(cluster, serviceGroup, Collections.emptyList(), serviceName, serviceName, repositoryVersion); - cluster.addService(service); - } - - return service; - } - - private ServiceComponent addServiceComponent(Cluster cluster, Service service, - String componentName) throws AmbariException { - ServiceComponent serviceComponent = null; - try { - serviceComponent = service.getServiceComponent(componentName); - } catch (ServiceComponentNotFoundException e) { - serviceComponent = serviceComponentFactory.createNew(service, componentName); - service.addServiceComponent(serviceComponent); - serviceComponent.setDesiredState(State.INSTALLED); - } - - return serviceComponent; - } - - private ServiceComponentHost createNewServiceComponentHost(Cluster cluster, String serviceName, - String svcComponent, String hostName) throws AmbariException { - Assert.assertNotNull(cluster.getConfigGroups()); - Service s = cluster.getService(serviceName); - ServiceComponent sc = addServiceComponent(cluster, s, svcComponent); - - ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName); - - sc.addServiceComponentHost(sch); - sch.setDesiredState(State.INSTALLED); - sch.setState(State.INSTALLED); - return sch; - } - - /** - * Creates an upgrade and associates it with the cluster. - */ - private UpgradeEntity createUpgrade(Cluster cluster, RepositoryVersionEntity repositoryVersion) - throws Exception { - - // create some entities for the finalize action to work with for patch - // history - RequestEntity requestEntity = new RequestEntity(); - requestEntity.setClusterId(cluster.getClusterId()); - requestEntity.setRequestId(1L); - requestEntity.setStartTime(System.currentTimeMillis()); - requestEntity.setCreateTime(System.currentTimeMillis()); - requestDAO.create(requestEntity); - - UpgradeEntity upgradeEntity = new UpgradeEntity(); - upgradeEntity.setId(1L); - upgradeEntity.setClusterId(cluster.getClusterId()); - upgradeEntity.setRequestEntity(requestEntity); - upgradeEntity.setUpgradePackage(""); - upgradeEntity.setRepositoryVersion(repositoryVersion); - upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING); - - Map services = cluster.getServices(); - for (String serviceName : services.keySet()) { - Service service = services.get(serviceName); - Map components = service.getServiceComponents(); - for (String componentName : components.keySet()) { - UpgradeHistoryEntity history = new UpgradeHistoryEntity(); - history.setUpgrade(upgradeEntity); - history.setServiceName(serviceName); - history.setComponentName(componentName); - history.setFromRepositoryVersion(service.getDesiredRepositoryVersion()); - history.setTargetRepositoryVersion(repositoryVersion); - upgradeEntity.addHistory(history); - } - } - - upgradeDAO.create(upgradeEntity); - cluster.setUpgradeEntity(upgradeEntity); - return upgradeEntity; - } - - private ExecutionCommand getExecutionCommand(Map commandParams) { - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setClusterName("c1"); - executionCommand.setCommandParams(commandParams); - executionCommand.setRoleParams(new HashMap<>()); - executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username"); - - return executionCommand; - } - - private Config createConfig(Cluster cluster, RepositoryVersionEntity repoVersion, String type, - String tag, Map properties) { - return configFactory.createNew(repoVersion.getStackId(), cluster, type, tag, properties, - NO_ATTRIBUTES, 1L); - } -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java deleted file mode 100644 index ca4d00aee2b..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java +++ /dev/null @@ -1,696 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.serveraction.upgrades; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.lang.reflect.Field; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.persistence.EntityManager; - -import org.apache.ambari.server.AmbariException; -import org.apache.ambari.server.H2DatabaseCleaner; -import org.apache.ambari.server.ServiceComponentNotFoundException; -import org.apache.ambari.server.ServiceNotFoundException; -import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper; -import org.apache.ambari.server.actionmanager.HostRoleCommand; -import org.apache.ambari.server.actionmanager.HostRoleCommandFactory; -import org.apache.ambari.server.actionmanager.HostRoleStatus; -import org.apache.ambari.server.agent.CommandReport; -import org.apache.ambari.server.agent.ExecutionCommand; -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.controller.AmbariManagementController; -import org.apache.ambari.server.controller.AmbariServer; -import org.apache.ambari.server.controller.ServiceConfigVersionResponse; -import org.apache.ambari.server.orm.GuiceJpaInitializer; -import org.apache.ambari.server.orm.InMemoryDefaultTestModule; -import org.apache.ambari.server.orm.OrmTestHelper; -import org.apache.ambari.server.orm.dao.HostComponentStateDAO; -import org.apache.ambari.server.orm.dao.HostDAO; -import org.apache.ambari.server.orm.dao.HostVersionDAO; -import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; -import org.apache.ambari.server.orm.dao.RequestDAO; -import org.apache.ambari.server.orm.dao.StackDAO; -import org.apache.ambari.server.orm.dao.UpgradeDAO; -import org.apache.ambari.server.orm.entities.HostComponentStateEntity; -import org.apache.ambari.server.orm.entities.HostVersionEntity; -import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; -import org.apache.ambari.server.orm.entities.RequestEntity; -import org.apache.ambari.server.orm.entities.StackEntity; -import org.apache.ambari.server.orm.entities.UpgradeEntity; -import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity; -import org.apache.ambari.server.serveraction.ServerAction; -import org.apache.ambari.server.state.Cluster; -import org.apache.ambari.server.state.Clusters; -import org.apache.ambari.server.state.Config; -import org.apache.ambari.server.state.ConfigFactory; -import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.RepositoryVersionState; -import org.apache.ambari.server.state.Service; -import org.apache.ambari.server.state.ServiceComponent; -import org.apache.ambari.server.state.ServiceComponentFactory; -import org.apache.ambari.server.state.ServiceComponentHost; -import org.apache.ambari.server.state.ServiceComponentHostFactory; -import org.apache.ambari.server.state.ServiceFactory; -import org.apache.ambari.server.state.ServiceGroup; -import org.apache.ambari.server.state.StackId; -import org.apache.ambari.server.state.State; -import org.apache.ambari.server.state.UpgradeState; -import org.apache.ambari.server.state.stack.UpgradePack; -import org.apache.ambari.server.state.stack.upgrade.UpgradeType; -import org.apache.ambari.server.utils.EventBusSynchronizer; -import org.apache.commons.lang.StringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import com.google.inject.Guice; -import com.google.inject.Inject; -import com.google.inject.Injector; -import com.google.inject.persist.UnitOfWork; - -/** - * Tests upgrade-related server side actions - */ -public class UpgradeActionTest { - private static final String clusterName = "c1"; - - private static final String HDP_2_1_1_0 = "2.1.1.0-1"; - private static final String HDP_2_1_1_1 = "2.1.1.1-2"; - - private static final String HDP_2_2_0_1 = "2.2.0.1-3"; - - private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1"); - private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0"); - - private RepositoryVersionEntity sourceRepositoryVersion; - - private Injector m_injector; - - private AmbariManagementController amc; - @Inject - private OrmTestHelper m_helper; - @Inject - private RepositoryVersionDAO repoVersionDAO; - @Inject - private Clusters clusters; - @Inject - private HostVersionDAO hostVersionDAO; - @Inject - private HostDAO hostDAO; - @Inject - private HostRoleCommandFactory hostRoleCommandFactory; - @Inject - private ServiceFactory serviceFactory; - @Inject - private ServiceComponentFactory serviceComponentFactory; - @Inject - private ServiceComponentHostFactory serviceComponentHostFactory; - @Inject - private RequestDAO requestDAO; - @Inject - private UpgradeDAO upgradeDAO; - @Inject - private StackDAO stackDAO; - @Inject - private AmbariMetaInfo ambariMetaInfo; - @Inject - private FinalizeUpgradeAction finalizeUpgradeAction; - @Inject - private ConfigFactory configFactory; - - @Inject - private HostComponentStateDAO hostComponentStateDAO; - - private RepositoryVersionEntity repositoryVersion2110; - private RepositoryVersionEntity repositoryVersion2111; - private RepositoryVersionEntity repositoryVersion2201; - - @Before - public void setup() throws Exception { - m_injector = Guice.createInjector(new InMemoryDefaultTestModule()); - EventBusSynchronizer.synchronizeAmbariEventPublisher(m_injector); - m_injector.getInstance(GuiceJpaInitializer.class); - m_injector.injectMembers(this); - m_injector.getInstance(UnitOfWork.class).begin(); - - // Initialize AmbariManagementController - amc = m_injector.getInstance(AmbariManagementController.class); - - Field field = AmbariServer.class.getDeclaredField("clusterController"); - field.setAccessible(true); - field.set(null, amc); - - repositoryVersion2110 = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_0); - repositoryVersion2111 = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_1); - repositoryVersion2201 = m_helper.getOrCreateRepositoryVersion(HDP_22_STACK, HDP_2_2_0_1); - } - - @After - public void teardown() throws Exception { - m_injector.getInstance(UnitOfWork.class).end(); - H2DatabaseCleaner.clearDatabase(m_injector.getProvider(EntityManager.class).get()); - } - - private void makeDowngradeCluster(RepositoryVersionEntity sourceRepoVersion, - RepositoryVersionEntity targetRepoVersion) throws Exception { - String hostName = "h1"; - - clusters.addCluster(clusterName, sourceRepoVersion.getStackId()); - - // add a host component - clusters.addHost(hostName); - - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - host.setHostAttributes(hostAttributes); - - HostVersionEntity entity = new HostVersionEntity(); - entity.setHostEntity(hostDAO.findByName(hostName)); - entity.setRepositoryVersion(targetRepoVersion); - entity.setState(RepositoryVersionState.INSTALLING); - hostVersionDAO.create(entity); - } - - private Cluster createUpgradeCluster( - RepositoryVersionEntity sourceRepoVersion, String hostName) throws Exception { - - clusters.addCluster(clusterName, sourceRepoVersion.getStackId()); - Cluster cluster = clusters.getCluster(clusterName); - - // add a host component - clusters.addHost(hostName); - - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - host.setHostAttributes(hostAttributes); - - // without this, HostEntity will not have a relation to ClusterEntity - clusters.mapHostToCluster(hostName, clusterName); - - HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName), - sourceRepoVersion, RepositoryVersionState.INSTALLED); - - hostVersionDAO.create(entity); - - return cluster; - } - - private void createHostVersions(RepositoryVersionEntity targetRepoVersion, - String hostName) throws AmbariException { - Cluster c = clusters.getCluster(clusterName); - - // create a single host with the UPGRADED HostVersionEntity - HostDAO hostDAO = m_injector.getInstance(HostDAO.class); - - HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName), - targetRepoVersion, RepositoryVersionState.INSTALLED); - - hostVersionDAO.create(entity); - - // verify the UPGRADED host versions were created successfully - List hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository( - c.getClusterId(), targetRepoVersion); - - assertEquals(1, hostVersions.size()); - assertEquals(RepositoryVersionState.INSTALLED, hostVersions.get(0).getState()); - } - - private void makeCrossStackUpgradeClusterAndSourceRepo(StackId sourceStack, String sourceRepo, - String hostName)throws Exception { - - clusters.addCluster(clusterName, sourceStack); - - StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion()); - - assertNotNull(stackEntitySource); - - Cluster c = clusters.getCluster(clusterName); - c.setCurrentStackVersion(sourceStack); - c.setDesiredStackVersion(sourceStack); - - // add a host component - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - - // Create the starting repo version - sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo); - } - - private void makeCrossStackUpgradeTargetRepo(StackId targetStack, String targetRepo, String hostName) throws Exception{ - StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion()); - assertNotNull(stackEntityTarget); - - m_helper.getOrCreateRepositoryVersion(new StackId(stackEntityTarget), targetRepo); - - // Start upgrading the newer repo - - HostDAO hostDAO = m_injector.getInstance(HostDAO.class); - - HostVersionEntity entity = new HostVersionEntity(); - entity.setHostEntity(hostDAO.findByName(hostName)); - entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo)); - entity.setState(RepositoryVersionState.INSTALLED); - hostVersionDAO.create(entity); - } - - /*** - * During an Express Upgrade that crosses a stack version, Ambari calls UpdateDesiredRepositoryAction - * in order to change the stack and apply configs. - * The configs that are applied must be saved with the username that is passed in the role params. - */ - @Test - public void testExpressUpgradeUpdateDesiredRepositoryAction() throws Exception { - StackId sourceStack = HDP_21_STACK; - StackId targetStack = HDP_22_STACK; - String sourceRepo = HDP_2_1_1_0; - String hostName = "h1"; - - // Must be a NON_ROLLING upgrade that jumps stacks in order for it to apply config changes. - // That upgrade pack has changes for ZK and NameNode. - String upgradePackName = "upgrade_nonrolling_new_stack"; - - Map packs = ambariMetaInfo.getUpgradePacks(sourceStack.getStackName(), sourceStack.getStackVersion()); - Assert.assertTrue(packs.containsKey(upgradePackName)); - - makeCrossStackUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName); - - Cluster cluster = clusters.getCluster(clusterName); - - // Install ZK and HDFS with some components - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", targetStack.getStackId()); - Service zk = installService(cluster, serviceGroup, "ZOOKEEPER", repositoryVersion2110); - addServiceComponent(zk, "ZOOKEEPER_SERVER"); - addServiceComponent(zk, "ZOOKEEPER_CLIENT"); - createNewServiceComponentHost(cluster, serviceGroup, "ZOOKEEPER", "ZOOKEEPER_SERVER", "h1"); - createNewServiceComponentHost(cluster, serviceGroup, "ZOOKEEPER", "ZOOKEEPER_CLIENT", "h1"); - - Service hdfs = installService(cluster, serviceGroup, "HDFS", repositoryVersion2110); - addServiceComponent(hdfs, "NAMENODE"); - addServiceComponent(hdfs, "DATANODE"); - createNewServiceComponentHost(cluster, serviceGroup, "HDFS", "NAMENODE", "h1"); - createNewServiceComponentHost(cluster, serviceGroup, "HDFS", "DATANODE", "h1"); - - makeCrossStackUpgradeTargetRepo(targetStack, repositoryVersion2201.getVersion(), hostName); - createUpgrade(cluster, repositoryVersion2201); - - Assert.assertNotNull(repositoryVersion2201); - - // Create some configs - createConfigs(cluster); - Collection configs = cluster.getAllConfigs(); - Assert.assertFalse(configs.isEmpty()); - - Map commandParams = new HashMap<>(); - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setCommandParams(commandParams); - Map roleParams = new HashMap<>(); - - // User that is performing the config changes - String userName = "admin"; - roleParams.put(ServerAction.ACTION_USER_NAME, userName); - executionCommand.setRoleParams(roleParams); - executionCommand.setClusterName(clusterName); - - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - // Call the action to change the desired stack and apply the configs from the Config Pack called by the Upgrade Pack. - UpdateDesiredRepositoryAction action = m_injector.getInstance(UpdateDesiredRepositoryAction.class); - action.setExecutionCommand(executionCommand); - action.setHostRoleCommand(hostRoleCommand); - - List configVersionsBefore = cluster.getServiceConfigVersions(); - - CommandReport report = action.execute(null); - assertNotNull(report); - - assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus()); - - List configVersionsAfter = cluster.getServiceConfigVersions(); - Assert.assertFalse(configVersionsAfter.isEmpty()); - - assertTrue(configVersionsAfter.size() - configVersionsBefore.size() >= 1); - } - - @Test - public void testFinalizeDowngrade() throws Exception { - makeDowngradeCluster(repositoryVersion2110, repositoryVersion2111); - - Cluster cluster = clusters.getCluster(clusterName); - - createUpgrade(cluster, repositoryVersion2111); - - Map commandParams = new HashMap<>(); - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setCommandParams(commandParams); - executionCommand.setClusterName(clusterName); - - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - finalizeUpgradeAction.setExecutionCommand(executionCommand); - finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand); - - CommandReport report = finalizeUpgradeAction.execute(null); - assertNotNull(report); - assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus()); - - for (HostVersionEntity entity : hostVersionDAO.findByClusterAndHost(clusterName, "h1")) { - if (StringUtils.equals(entity.getRepositoryVersion().getVersion(), repositoryVersion2110.getVersion())) { - assertEquals(RepositoryVersionState.CURRENT, entity.getState()); - } else { - assertEquals(RepositoryVersionState.INSTALLED, entity.getState()); - } - } - } - - @Test - public void testFinalizeUpgrade() throws Exception { - String hostName = "h1"; - - createUpgradeCluster(repositoryVersion2110, hostName); - createHostVersions(repositoryVersion2111, hostName); - - Cluster cluster = clusters.getCluster(clusterName); - - createUpgrade(cluster, repositoryVersion2111); - - // Finalize the upgrade - Map commandParams = new HashMap<>(); - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setCommandParams(commandParams); - executionCommand.setClusterName(clusterName); - - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - finalizeUpgradeAction.setExecutionCommand(executionCommand); - finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand); - - // this should fail since the host versions have not moved to current - CommandReport report = finalizeUpgradeAction.execute(null); - assertEquals(HostRoleStatus.FAILED.name(), report.getStatus()); - - List hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository( - cluster.getClusterId(), repositoryVersion2111); - - for (HostVersionEntity hostVersion : hostVersions) { - hostVersion.setState(RepositoryVersionState.CURRENT); - } - - report = finalizeUpgradeAction.execute(null); - assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus()); - - hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(cluster.getClusterId(), - repositoryVersion2111); - - for (HostVersionEntity hostVersion : hostVersions) { - Collection hostComponentStates = hostComponentStateDAO.findByHost(hostVersion.getHostName()); - for (HostComponentStateEntity hostComponentStateEntity: hostComponentStates) { - assertEquals(UpgradeState.NONE, hostComponentStateEntity.getUpgradeState()); - } - } - } - - /** - * Tests that finalize still works when there are hosts which are already - * {@link RepositoryVersionState#CURRENT}. - */ - @Test - public void testFinalizeWithHostsAlreadyCurrent() throws Exception { - String hostName = "h1"; - - createUpgradeCluster(repositoryVersion2110, hostName); - createHostVersions(repositoryVersion2111, hostName); - - // move the old version from CURRENT to INSTALLED and the new version from - // UPGRADED to CURRENT - this will simulate what happens when a host is - // removed before finalization and all hosts transition to CURRENT - List hostVersions = hostVersionDAO.findAll(); - for (HostVersionEntity hostVersion : hostVersions) { - if (hostVersion.getState() == RepositoryVersionState.CURRENT) { - hostVersion.setState(RepositoryVersionState.INSTALLED); - } else { - hostVersion.setState(RepositoryVersionState.CURRENT); - } - - hostVersionDAO.merge(hostVersion); - } - - // Verify the repo before calling Finalize - Cluster cluster = clusters.getCluster(clusterName); - - createUpgrade(cluster, repositoryVersion2111); - - // Finalize the upgrade - Map commandParams = new HashMap<>(); - - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setCommandParams(commandParams); - executionCommand.setClusterName(clusterName); - - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - finalizeUpgradeAction.setExecutionCommand(executionCommand); - finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand); - - CommandReport report = finalizeUpgradeAction.execute(null); - assertNotNull(report); - assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus()); - } - - /** - * Tests that all host versions are correct after upgrade. This test will - * ensure that the prior CURRENT versions are moved to INSTALLED while not - * touching any others. - */ - @Test - public void testHostVersionsAfterUpgrade() throws Exception { - String hostName = "h1"; - Cluster cluster = createUpgradeCluster(repositoryVersion2110, hostName); - createHostVersions(repositoryVersion2111, hostName); - createHostVersions(repositoryVersion2201, hostName); - - // Install ZK with some components - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", repositoryVersion2111.getStackName()+"-"+repositoryVersion2111.getStackVersion()); - Service zk = installService(cluster, serviceGroup, "ZOOKEEPER", repositoryVersion2110); - addServiceComponent(zk, "ZOOKEEPER_SERVER"); - addServiceComponent(zk, "ZOOKEEPER_CLIENT"); - createNewServiceComponentHost(cluster, serviceGroup, "ZOOKEEPER", "ZOOKEEPER_SERVER", hostName); - createNewServiceComponentHost(cluster, serviceGroup, "ZOOKEEPER", "ZOOKEEPER_CLIENT", hostName); - - List hostVersions = hostVersionDAO.findAll(); - assertEquals(3, hostVersions.size()); - - // repo 2110 - CURRENT (upgrading from) - // repo 2111 - CURRENT (all hosts reported in during upgrade) - // repo 2201 - NOT_REQUIRED (different stack) - for (HostVersionEntity hostVersion : hostVersions) { - RepositoryVersionEntity hostRepoVersion = hostVersion.getRepositoryVersion(); - if (repositoryVersion2110.equals(hostRepoVersion)) { - hostVersion.setState(RepositoryVersionState.CURRENT); - } else if (repositoryVersion2111.equals(hostRepoVersion)) { - hostVersion.setState(RepositoryVersionState.CURRENT); - } else { - hostVersion.setState(RepositoryVersionState.NOT_REQUIRED); - } - - hostVersionDAO.merge(hostVersion); - } - - // upgrade to 2111 - createUpgrade(cluster, repositoryVersion2111); - - // push all services to the correct repo version for finalize - Map services = cluster.getServices(); - assertTrue(services.size() > 0); - for (Service service : services.values()) { - service.setDesiredRepositoryVersion(repositoryVersion2111); - } - - // push all components to the correct version - List hostComponentStates = hostComponentStateDAO.findByHost(hostName); - for (HostComponentStateEntity hostComponentState : hostComponentStates) { - hostComponentState.setVersion(repositoryVersion2111.getVersion()); - hostComponentStateDAO.merge(hostComponentState); - } - - Map commandParams = new HashMap<>(); - ExecutionCommand executionCommand = new ExecutionCommand(); - executionCommand.setCommandParams(commandParams); - executionCommand.setClusterName(clusterName); - - HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); - hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); - - finalizeUpgradeAction.setExecutionCommand(executionCommand); - finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand); - - // finalize - CommandReport report = finalizeUpgradeAction.execute(null); - assertNotNull(report); - assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus()); - - for (HostVersionEntity hostVersion : hostVersions) { - RepositoryVersionEntity hostRepoVersion = hostVersion.getRepositoryVersion(); - if (repositoryVersion2110.equals(hostRepoVersion)) { - assertEquals(RepositoryVersionState.INSTALLED, hostVersion.getState()); - } else if (repositoryVersion2111.equals(hostRepoVersion)) { - assertEquals(RepositoryVersionState.CURRENT, hostVersion.getState()); - } else { - assertEquals(RepositoryVersionState.NOT_REQUIRED, hostVersion.getState()); - } - } - } - - private ServiceComponentHost createNewServiceComponentHost(Cluster cluster, ServiceGroup serviceGroup, String svc, - String svcComponent, String hostName - ) throws AmbariException { - Assert.assertNotNull(cluster.getConfigGroups()); - Service s = installService(cluster, serviceGroup, svc, sourceRepositoryVersion); - ServiceComponent sc = addServiceComponent(s, svcComponent); - - ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName); - - sc.addServiceComponentHost(sch); - sch.setDesiredState(State.INSTALLED); - sch.setState(State.INSTALLED); - return sch; - } - - private Service installService(Cluster cluster, ServiceGroup serviceGroup, String serviceName, - RepositoryVersionEntity repositoryVersionEntity - ) throws AmbariException { - Service service; - - try { - service = cluster.getService(serviceName); - } catch (ServiceNotFoundException e) { - service = serviceFactory.createNew(cluster, serviceGroup, Collections.emptyList(), serviceName, serviceName, repositoryVersionEntity); - cluster.addService(service); - } - - return service; - } - - private ServiceComponent addServiceComponent(Service service, - String componentName - ) throws AmbariException { - ServiceComponent serviceComponent; - try { - serviceComponent = service.getServiceComponent(componentName); - } catch (ServiceComponentNotFoundException e) { - serviceComponent = serviceComponentFactory.createNew(service, componentName); - service.addServiceComponent(serviceComponent); - serviceComponent.setDesiredState(State.INSTALLED); - } - - return serviceComponent; - } - - private void createConfigs(Cluster cluster) { - Map properties = new HashMap<>(); - Map> propertiesAttributes = new HashMap<>(); - properties.put("a", "a1"); - properties.put("b", "b1"); - - configFactory.createNew(cluster, "zookeeper-env", "version-" + System.currentTimeMillis(), - properties, propertiesAttributes); - - properties.put("zookeeper_a", "value_1"); - properties.put("zookeeper_b", "value_2"); - - configFactory.createNew(cluster, "hdfs-site", "version-" + System.currentTimeMillis(), - properties, propertiesAttributes); - - properties.put("hdfs_a", "value_3"); - properties.put("hdfs_b", "value_4"); - - configFactory.createNew(cluster, "core-site", "version-" + System.currentTimeMillis(), - properties, propertiesAttributes); - - configFactory.createNew(cluster, "foo-site", "version-" + System.currentTimeMillis(), - properties, propertiesAttributes); - } - - /** - * Creates an upgrade and associates it with the cluster. - */ - private UpgradeEntity createUpgrade(Cluster cluster, RepositoryVersionEntity repositoryVersion) - throws Exception { - - // create some entities for the finalize action to work with for patch - // history - RequestEntity requestEntity = new RequestEntity(); - requestEntity.setClusterId(cluster.getClusterId()); - requestEntity.setRequestId(1L); - requestEntity.setStartTime(System.currentTimeMillis()); - requestEntity.setCreateTime(System.currentTimeMillis()); - requestDAO.create(requestEntity); - - UpgradeEntity upgradeEntity = new UpgradeEntity(); - upgradeEntity.setId(1L); - upgradeEntity.setClusterId(cluster.getClusterId()); - upgradeEntity.setRequestEntity(requestEntity); - upgradeEntity.setUpgradePackage(""); - upgradeEntity.setRepositoryVersion(repositoryVersion); - upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING); - - Map services = cluster.getServices(); - for (String serviceName : services.keySet()) { - Service service = services.get(serviceName); - Map components = service.getServiceComponents(); - for (String componentName : components.keySet()) { - UpgradeHistoryEntity history = new UpgradeHistoryEntity(); - history.setUpgrade(upgradeEntity); - history.setServiceName(serviceName); - history.setComponentName(componentName); - history.setFromRepositoryVersion(service.getDesiredRepositoryVersion()); - history.setTargetRepositoryVersion(repositoryVersion); - upgradeEntity.addHistory(history); - } - } - - upgradeDAO.create(upgradeEntity); - cluster.setUpgradeEntity(upgradeEntity); - return upgradeEntity; - } -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java deleted file mode 100644 index 6f477634887..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java +++ /dev/null @@ -1,2911 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.state; - -import static com.google.common.collect.Sets.newLinkedHashSet; -import static java.util.Collections.emptySet; -import static java.util.Collections.singletonList; -import static org.easymock.EasyMock.anyString; -import static org.easymock.EasyMock.eq; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.expectLastCall; -import static org.easymock.EasyMock.replay; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.lang.reflect.Field; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; -import org.apache.ambari.server.AmbariException; -import org.apache.ambari.server.H2DatabaseCleaner; -import org.apache.ambari.server.actionmanager.HostRoleCommandFactory; -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.api.services.ServiceGroupKey; -import org.apache.ambari.server.api.services.ServiceKey; -import org.apache.ambari.server.controller.AmbariManagementController; -import org.apache.ambari.server.controller.ClusterRequest; -import org.apache.ambari.server.controller.ConfigurationRequest; -import org.apache.ambari.server.controller.internal.UpgradeResourceProvider; -import org.apache.ambari.server.orm.GuiceJpaInitializer; -import org.apache.ambari.server.orm.InMemoryDefaultTestModule; -import org.apache.ambari.server.orm.OrmTestHelper; -import org.apache.ambari.server.orm.dao.ServiceConfigDAO; -import org.apache.ambari.server.orm.entities.ClusterConfigEntity; -import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; -import org.apache.ambari.server.orm.entities.ServiceConfigEntity; -import org.apache.ambari.server.security.TestAuthenticationFactory; -import org.apache.ambari.server.security.authorization.AuthorizationException; -import org.apache.ambari.server.stack.HostsType; -import org.apache.ambari.server.stack.MasterHostResolver; -import org.apache.ambari.server.stack.StackManagerMock; -import org.apache.ambari.server.stageplanner.RoleGraphFactory; -import org.apache.ambari.server.state.UpgradeHelper.UpgradeGroupHolder; -import org.apache.ambari.server.state.stack.ConfigUpgradePack; -import org.apache.ambari.server.state.stack.UpgradePack; -import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition; -import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.ConfigurationKeyValue; -import org.apache.ambari.server.state.stack.upgrade.ConfigureTask; -import org.apache.ambari.server.state.stack.upgrade.Direction; -import org.apache.ambari.server.state.stack.upgrade.ExecuteTask; -import org.apache.ambari.server.state.stack.upgrade.Grouping; -import org.apache.ambari.server.state.stack.upgrade.HostOrderGrouping; -import org.apache.ambari.server.state.stack.upgrade.HostOrderItem; -import org.apache.ambari.server.state.stack.upgrade.HostOrderItem.HostOrderActionType; -import org.apache.ambari.server.state.stack.upgrade.ManualTask; -import org.apache.ambari.server.state.stack.upgrade.SecurityCondition; -import org.apache.ambari.server.state.stack.upgrade.ServiceCheckGrouping; -import org.apache.ambari.server.state.stack.upgrade.StageWrapper; -import org.apache.ambari.server.state.stack.upgrade.StopGrouping; -import org.apache.ambari.server.state.stack.upgrade.Task; -import org.apache.ambari.server.state.stack.upgrade.TaskWrapper; -import org.apache.ambari.server.state.stack.upgrade.UpgradeScope; -import org.apache.ambari.server.state.stack.upgrade.UpgradeType; -import org.apache.ambari.server.utils.EventBusSynchronizer; -import org.easymock.Capture; -import org.easymock.EasyMock; -import org.easymock.EasyMockSupport; -import org.easymock.IAnswer; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.springframework.security.core.context.SecurityContextHolder; - -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import com.google.gson.Gson; -import com.google.gson.reflect.TypeToken; -import com.google.inject.Binder; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Module; -import com.google.inject.Provider; -import com.google.inject.assistedinject.FactoryModuleBuilder; -import com.google.inject.util.Modules; - -/** - * Tests the {@link UpgradeHelper} class - */ -public class UpgradeHelperTest extends EasyMockSupport { - - private static final StackId STACK_ID_HDP_211 = new StackId("HDP-2.1.1"); - private static final StackId STACK_ID_HDP_220 = new StackId("HDP-2.2.0"); - private static final String UPGRADE_VERSION = "2.2.1.0-1234"; - private static final String DOWNGRADE_VERSION = "2.2.0.0-1234"; - - private Injector injector; - private AmbariMetaInfo ambariMetaInfo; - private StackManagerMock stackManagerMock; - private OrmTestHelper helper; - private MasterHostResolver m_masterHostResolver; - private UpgradeHelper m_upgradeHelper; - private ConfigHelper m_configHelper; - private AmbariManagementController m_managementController; - private Gson m_gson = new Gson(); - - private RepositoryVersionEntity repositoryVersion2110; - private RepositoryVersionEntity repositoryVersion2200; - private RepositoryVersionEntity repositoryVersion2210; - private HostsType namenodeHosts = HostsType.highAvailability("h1", "h2", newLinkedHashSet(Arrays.asList("h1", "h2"))); - - /** - * Because test cases need to share config mocks, put common ones in this function. - * @throws Exception - */ - private void setConfigMocks() throws Exception { - // configure the mock to return data given a specific placeholder - m_configHelper = EasyMock.createNiceMock(ConfigHelper.class); - expect(m_configHelper.getPlaceholderValueFromDesiredConfigurations( - EasyMock.anyObject(Cluster.class), eq("{{foo/bar}}"))).andReturn("placeholder-rendered-properly").anyTimes(); - expect(m_configHelper.getEffectiveDesiredTags( - EasyMock.anyObject(Cluster.class), EasyMock.anyObject(String.class))).andReturn(new HashMap<>()).anyTimes(); - } - - @Before - public void before() throws Exception { - setConfigMocks(); - // Most test cases can replay the common config mocks. If any test case needs custom ones, it can re-initialize m_configHelper; - replay(m_configHelper); - - final InMemoryDefaultTestModule injectorModule = new InMemoryDefaultTestModule() { - @Override - protected void configure() { - super.configure(); - } - }; - - MockModule mockModule = new MockModule(); - - // create an injector which will inject the mocks - injector = Guice.createInjector(Modules.override(injectorModule).with(mockModule)); - H2DatabaseCleaner.resetSequences(injector); - injector.getInstance(GuiceJpaInitializer.class); - - EventBusSynchronizer.synchronizeAmbariEventPublisher(injector); - EventBusSynchronizer.synchronizeAlertEventPublisher(injector); - - helper = injector.getInstance(OrmTestHelper.class); - ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class); - stackManagerMock = (StackManagerMock) ambariMetaInfo.getStackManager(); - m_upgradeHelper = injector.getInstance(UpgradeHelper.class); - m_masterHostResolver = EasyMock.createMock(MasterHostResolver.class); - m_managementController = injector.getInstance(AmbariManagementController.class); - - repositoryVersion2110 = helper.getOrCreateRepositoryVersion(STACK_ID_HDP_211, "2.1.1.0-1234"); - repositoryVersion2200 = helper.getOrCreateRepositoryVersion(STACK_ID_HDP_220, DOWNGRADE_VERSION); - repositoryVersion2210 = helper.getOrCreateRepositoryVersion(STACK_ID_HDP_220, UPGRADE_VERSION); - - // Set the authenticated user - // TODO: remove this or replace the authenticated user to test authorization rules - SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createAdministrator("admin")); - } - - @After - public void teardown() throws AmbariException, SQLException { - H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector); - - // Clear the authenticated user - SecurityContextHolder.getContext().setAuthentication(null); - } - - @Test - public void testSuggestUpgradePack() throws Exception{ - final String clusterName = "c1"; - final StackId sourceStackId = new StackId("HDP", "2.1.1"); - final StackId targetStackId = new StackId("HDP", "2.2.0"); - final Direction upgradeDirection = Direction.UPGRADE; - final UpgradeType upgradeType = UpgradeType.ROLLING; - - makeCluster(); - try { - String preferredUpgradePackName = "upgrade_test"; - UpgradePack up = m_upgradeHelper.suggestUpgradePack(clusterName, sourceStackId, targetStackId, upgradeDirection, upgradeType, preferredUpgradePackName); - assertEquals(upgradeType, up.getType()); - } catch (AmbariException e){ - assertTrue(false); - } - } - - @Test - public void testUpgradeOrchestration() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("foo", "bar"); - assertTrue(upgrades.isEmpty()); - - upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - ServiceInfo si = ambariMetaInfo.getService("HDP", "2.1.1", "ZOOKEEPER"); - si.setDisplayName("Zk"); - ComponentInfo ci = si.getComponentByName("ZOOKEEPER_SERVER"); - ci.setDisplayName("ZooKeeper1 Server2"); - - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - assertEquals("PRE_CLUSTER", groups.get(0).name); - assertEquals("ZOOKEEPER", groups.get(1).name); - assertEquals("CORE_MASTER", groups.get(2).name); - assertEquals("CORE_SLAVES", groups.get(3).name); - assertEquals("HIVE", groups.get(4).name); - assertEquals("OOZIE", groups.get(5).name); - - UpgradeGroupHolder holder = groups.get(2); - boolean found = false; - for (StageWrapper sw : holder.items) { - if (sw.getTasksJson().contains("Upgrading your database")) { - found = true; - } - } - assertTrue("Expected to find replaced text for Upgrading", found); - - UpgradeGroupHolder group = groups.get(1); - // check that the display name is being used - assertTrue(group.items.get(1).getText().contains("ZooKeeper1 Server2")); - assertEquals(group.items.get(5).getText(), "Service Check Zk"); - - group = groups.get(3); - assertEquals(8, group.items.size()); - StageWrapper sw = group.items.get(3); - assertEquals("Validate Partial Upgrade", sw.getText()); - assertEquals(1, sw.getTasks().size()); - assertEquals(1, sw.getTasks().get(0).getTasks().size()); - Task t = sw.getTasks().get(0).getTasks().get(0); - assertEquals(ManualTask.class, t.getClass()); - ManualTask mt = (ManualTask) t; - assertTrue(mt.messages.get(0).contains("DataNode and NodeManager")); - assertNotNull(mt.structuredOut); - assertTrue(mt.structuredOut.contains("DATANODE")); - assertTrue(mt.structuredOut.contains("NODEMANAGER")); - - UpgradeGroupHolder postGroup = groups.get(6); - assertEquals("POST_CLUSTER", postGroup.name); - assertEquals("Finalize Upgrade", postGroup.title); - assertEquals(3, postGroup.items.size()); - assertEquals("Confirm Finalize", postGroup.items.get(0).getText()); - assertEquals("Execute HDFS Finalize", postGroup.items.get(1).getText()); - assertEquals("Save Cluster State", postGroup.items.get(2).getText()); - assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(2).getType()); - - assertEquals(4, groups.get(0).items.size()); - assertEquals(6, groups.get(1).items.size()); - assertEquals(9, groups.get(2).items.size()); - assertEquals(8, groups.get(3).items.size()); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES) - @Test - public void testPartialUpgradeOrchestration() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("foo", "bar"); - assertTrue(upgrades.isEmpty()); - - upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - ServiceInfo si = ambariMetaInfo.getService("HDP", "2.1.1", "ZOOKEEPER"); - si.setDisplayName("Zk"); - ComponentInfo ci = si.getComponentByName("ZOOKEEPER_SERVER"); - ci.setDisplayName("ZooKeeper1 Server2"); - - assertTrue(upgrades.containsKey("upgrade_test_partial")); - UpgradePack upgrade = upgrades.get("upgrade_test_partial"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - Set services = Collections.singleton("ZOOKEEPER"); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING, - repositoryVersion2210, RepositoryType.PATCH, services); - - List groupings = upgrade.getGroups(Direction.UPGRADE); - assertEquals(8, groupings.size()); - assertEquals(UpgradeScope.COMPLETE, groupings.get(6).scope); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(3, groups.size()); - - assertEquals("PRE_CLUSTER", groups.get(0).name); - assertEquals("ZOOKEEPER", groups.get(1).name); - assertEquals("POST_CLUSTER", groups.get(2).name); - - UpgradeGroupHolder group = groups.get(1); - // check that the display name is being used - assertTrue(group.items.get(1).getText().contains("ZooKeeper1 Server2")); - assertEquals("Service Check Zk", group.items.get(6).getText()); - - UpgradeGroupHolder postGroup = groups.get(2); - assertEquals("POST_CLUSTER", postGroup.name); - assertEquals("Finalize Upgrade", postGroup.title); - assertEquals(2, postGroup.items.size()); - assertEquals("Confirm Finalize", postGroup.items.get(0).getText()); - assertEquals("Save Cluster State", postGroup.items.get(1).getText()); - assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(1).getType()); - - assertEquals(2, groups.get(0).items.size()); - assertEquals(7, groups.get(1).items.size()); - assertEquals(2, groups.get(2).items.size()); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - @Test - public void testCompleteUpgradeOrchestration() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("foo", "bar"); - assertTrue(upgrades.isEmpty()); - - upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - ServiceInfo si = ambariMetaInfo.getService("HDP", "2.1.1", "ZOOKEEPER"); - si.setDisplayName("Zk"); - ComponentInfo ci = si.getComponentByName("ZOOKEEPER_SERVER"); - ci.setDisplayName("ZooKeeper1 Server2"); - - assertTrue(upgrades.containsKey("upgrade_test_partial")); - UpgradePack upgrade = upgrades.get("upgrade_test_partial"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING, - repositoryVersion2210, RepositoryType.STANDARD, Collections.singleton("ZOOKEEPER")); - - List groupings = upgrade.getGroups(Direction.UPGRADE); - assertEquals(8, groupings.size()); - assertEquals(UpgradeScope.COMPLETE, groupings.get(6).scope); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(4, groups.size()); - - assertEquals("PRE_CLUSTER", groups.get(0).name); - assertEquals("ZOOKEEPER", groups.get(1).name); - assertEquals("ALL_HOSTS", groups.get(2).name); - assertEquals("POST_CLUSTER", groups.get(3).name); - - UpgradeGroupHolder group = groups.get(1); - // check that the display name is being used - assertTrue(group.items.get(1).getText().contains("ZooKeeper1 Server2")); - assertEquals("Service Check Zk", group.items.get(5).getText()); - - UpgradeGroupHolder postGroup = groups.get(3); - assertEquals("POST_CLUSTER", postGroup.name); - assertEquals("Finalize Upgrade", postGroup.title); - assertEquals(2, postGroup.items.size()); - assertEquals("Confirm Finalize", postGroup.items.get(0).getText()); - assertEquals("Save Cluster State", postGroup.items.get(1).getText()); - assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(1).getType()); - - assertEquals(2, groups.get(0).items.size()); - assertEquals(6, groups.get(1).items.size()); - assertEquals(1, groups.get(2).items.size()); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - @Test - public void testUpgradeServerActionOrchestration() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - ServiceInfo si = ambariMetaInfo.getService("HDP", "2.1.1", "ZOOKEEPER"); - si.setDisplayName("Zk"); - ComponentInfo ci = si.getComponentByName("ZOOKEEPER_SERVER"); - ci.setDisplayName("ZooKeeper1 Server2"); - - assertTrue(upgrades.containsKey("upgrade_server_action_test")); - UpgradePack upgrade = upgrades.get("upgrade_server_action_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(1, groups.size()); - UpgradeGroupHolder group = groups.get(0); - assertEquals("CLUSTER_SERVER_ACTIONS", group.name); - List stageWrappers = group.items; - assertEquals(6, stageWrappers.size()); - assertEquals("Pre Upgrade", stageWrappers.get(0).getText()); - assertEquals("Pre Upgrade Zookeeper", stageWrappers.get(1).getText()); - assertEquals("Configuring", stageWrappers.get(2).getText()); - assertEquals("Configuring HDFS", stageWrappers.get(3).getText()); - assertEquals("Calculating Properties", stageWrappers.get(4).getText()); - assertEquals("Calculating HDFS Properties", stageWrappers.get(5).getText()); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - /** - * Tests that hosts in MM are not included in the upgrade. - * - * @throws Exception - */ - @Test - public void testUpgradeOrchestrationWithHostsInMM() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("foo", "bar"); - assertTrue(upgrades.isEmpty()); - - upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - ServiceInfo si = ambariMetaInfo.getService("HDP", "2.1.1", "ZOOKEEPER"); - si.setDisplayName("Zk"); - - ComponentInfo ci = si.getComponentByName("ZOOKEEPER_SERVER"); - ci.setDisplayName("ZooKeeper1 Server2"); - - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - - // turn on MM for the first host - Cluster cluster = makeCluster(); - Host hostInMaintenanceMode = cluster.getHosts().iterator().next(); - hostInMaintenanceMode.setMaintenanceState(cluster.getClusterId(), MaintenanceState.ON); - - UpgradeContext context = getMockUpgradeContextNoReplay(cluster, Direction.UPGRADE, - UpgradeType.ROLLING, repositoryVersion2210); - - // use a "real" master host resolver here so that we can actually test MM - MasterHostResolver masterHostResolver = new MasterHostResolver(cluster, null, context); - - expect(context.getResolver()).andReturn(masterHostResolver).anyTimes(); - replay(context); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - assertEquals(7, groups.size()); - - for (UpgradeGroupHolder group : groups) { - for (StageWrapper stageWrapper : group.items) { - Set hosts = stageWrapper.getHosts(); - assertFalse(hosts.contains(hostInMaintenanceMode.getHostName())); - } - } - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - /** - * Verify that a Rolling Upgrades restarts the NameNodes in the following order: standby, active. - * @throws Exception - */ - @Test - public void testNamenodeOrder() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - UpgradeGroupHolder mastersGroup = groups.get(2); - assertEquals("CORE_MASTER", mastersGroup.name); - - List orderedNameNodes = new LinkedList<>(); - for (StageWrapper sw : mastersGroup.items) { - if (sw.getType().equals(StageWrapper.Type.RESTART) && sw.getText().toLowerCase().contains("NameNode".toLowerCase())) { - for (TaskWrapper tw : sw.getTasks()) { - for (String hostName : tw.getHosts()) { - orderedNameNodes.add(hostName); - } - } - } - } - - assertEquals(2, orderedNameNodes.size()); - // Order is standby, then active. - assertEquals("h2", orderedNameNodes.get(0)); - assertEquals("h1", orderedNameNodes.get(1)); - } - - @Test - public void testNamenodeFederationOrder() throws Exception { - namenodeHosts = HostsType.federated( - Arrays.asList( - new HostsType.HighAvailabilityHosts("h1", Arrays.asList("h2", "h3")), - new HostsType.HighAvailabilityHosts("h4", singletonList("h5"))), - newLinkedHashSet(Arrays.asList("h1", "h2", "h3", "h4", "h5"))); - - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - UpgradeGroupHolder mastersGroup = groups.get(2); - assertEquals("CORE_MASTER", mastersGroup.name); - - List orderedNameNodes = new LinkedList<>(); - for (StageWrapper sw : mastersGroup.items) { - if (sw.getType().equals(StageWrapper.Type.RESTART) && sw.getText().toLowerCase().contains("NameNode".toLowerCase())) { - for (TaskWrapper tw : sw.getTasks()) { - for (String hostName : tw.getHosts()) { - orderedNameNodes.add(hostName); - } - } - } - } - assertEquals(Arrays.asList("h2", "h3", "h1", "h5", "h4"), orderedNameNodes); - } - - @Test - public void testUpgradeOrchestrationWithNoHeartbeat() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("foo", "bar"); - assertTrue(upgrades.isEmpty()); - - upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(false); - - Clusters clusters = injector.getInstance(Clusters.class); - Host h4 = clusters.getHost("h4"); - h4.setState(HostState.HEARTBEAT_LOST); - - List schs = cluster.getServiceComponentHosts("h4"); - assertEquals(1, schs.size()); - assertEquals(HostState.HEARTBEAT_LOST, schs.get(0).getHostState()); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - assertEquals("PRE_CLUSTER", groups.get(0).name); - assertEquals("ZOOKEEPER", groups.get(1).name); - assertEquals("CORE_MASTER", groups.get(2).name); - assertEquals("CORE_SLAVES", groups.get(3).name); - assertEquals("HIVE", groups.get(4).name); - assertEquals("OOZIE", groups.get(5).name); - - UpgradeGroupHolder postGroup = groups.get(6); - assertEquals("POST_CLUSTER", postGroup.name); - assertEquals("Finalize Upgrade", postGroup.title); - assertEquals(3, postGroup.items.size()); - assertEquals("Confirm Finalize", postGroup.items.get(0).getText()); - assertEquals("Execute HDFS Finalize", postGroup.items.get(1).getText()); - assertEquals("Save Cluster State", postGroup.items.get(2).getText()); - assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(2).getType()); - - assertEquals(6, groups.get(1).items.size()); - assertEquals(9, groups.get(2).items.size()); - assertEquals(7, groups.get(3).items.size()); - } - - @Test - public void testDowngradeOrchestration() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.DOWNGRADE, - UpgradeType.ROLLING, repositoryVersion2200); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - assertEquals("PRE_CLUSTER", groups.get(0).name); - assertEquals("OOZIE", groups.get(1).name); - assertEquals("HIVE", groups.get(2).name); - assertEquals("CORE_SLAVES", groups.get(3).name); - assertEquals("CORE_MASTER", groups.get(4).name); - assertEquals("ZOOKEEPER", groups.get(5).name); - - - UpgradeGroupHolder postGroup = groups.get(6); - assertEquals("POST_CLUSTER", postGroup.name); - assertEquals("Finalize Downgrade", postGroup.title); - assertEquals(3, postGroup.items.size()); - assertEquals("Confirm Finalize", postGroup.items.get(0).getText()); - assertEquals("Execute HDFS Finalize", postGroup.items.get(1).getText()); - assertEquals("Save Cluster State", postGroup.items.get(2).getText()); - assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(2).getType()); - - assertEquals(4, groups.get(0).items.size()); - assertEquals(8, groups.get(1).items.size()); - assertEquals(5, groups.get(2).items.size()); - assertEquals(7, groups.get(3).items.size()); - assertEquals(8, groups.get(4).items.size()); - } - - @Test - public void testBuckets() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_bucket_test")); - UpgradePack upgrade = upgrades.get("upgrade_bucket_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(1, groups.size()); - UpgradeGroupHolder group = groups.iterator().next(); - - // Pre: - // Manual task = 1 - // 2x - Execute task on all 3 = 6 - - // Post: - // Execute task on all 3 = 3 - // 2x - Manual task = 2 - // 3x - Execute task on all 3 = 9 - - // Service Check = 1 - assertEquals(22, group.items.size()); - } - - @Test - public void testManualTaskPostProcessing() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - // grab the manual task out of ZK which has placeholder text - UpgradeGroupHolder zookeeperGroup = groups.get(1); - assertEquals("ZOOKEEPER", zookeeperGroup.name); - ManualTask manualTask = (ManualTask) zookeeperGroup.items.get(0).getTasks().get( - 0).getTasks().get(0); - - assertEquals(1, manualTask.messages.size()); - assertEquals("This is a manual task with a placeholder of placeholder-rendered-properly", - manualTask.messages.get(0)); - } - - @Test - public void testConditionalDeleteTask() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - ConfigUpgradePack cup = ambariMetaInfo.getConfigUpgradePack("HDP", "2.1.1"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - // grab the configure task out of Hive - UpgradeGroupHolder hiveGroup = groups.get(4); - assertEquals("HIVE", hiveGroup.name); - ConfigureTask configureTask = (ConfigureTask) hiveGroup.items.get(1).getTasks().get(0).getTasks().get(0); - - // now change the thrift port to http to have the 2nd condition invoked - Map hiveConfigs = new HashMap<>(); - hiveConfigs.put("hive.server2.transport.mode", "http"); - hiveConfigs.put("hive.server2.thrift.port", "10001"); - hiveConfigs.put("condition", "1"); - - ConfigurationRequest configurationRequest = new ConfigurationRequest(); - configurationRequest.setClusterName(cluster.getClusterName()); - configurationRequest.setType("hive-site"); - configurationRequest.setVersionTag("version2"); - configurationRequest.setProperties(hiveConfigs); - - final ClusterRequest clusterRequest = new ClusterRequest( - cluster.getClusterId(), cluster.getClusterName(), - cluster.getDesiredStackVersion().getStackVersion(), null); - - clusterRequest.setDesiredConfig(singletonList(configurationRequest)); - m_managementController.updateClusters(new HashSet() { - { - add(clusterRequest); - } - }, null); - - Map configProperties = configureTask.getConfigurationChanges(cluster, cup); - assertFalse(configProperties.isEmpty()); - assertEquals(configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE), "hive-site"); - - String configurationJson = configProperties.get(ConfigureTask.PARAMETER_TRANSFERS); - assertNotNull(configurationJson); - - List transfers = m_gson.fromJson(configurationJson, - new TypeToken>() { }.getType()); - - assertEquals(6, transfers.size()); - assertEquals("copy-key", transfers.get(0).fromKey); - assertEquals("copy-key-to", transfers.get(0).toKey); - - assertEquals("move-key", transfers.get(1).fromKey); - assertEquals("move-key-to", transfers.get(1).toKey); - - assertEquals("delete-key", transfers.get(2).deleteKey); - assertEquals("delete-http-1", transfers.get(3).deleteKey); - assertEquals("delete-http-2", transfers.get(4).deleteKey); - assertEquals("delete-http-3", transfers.get(5).deleteKey); - - - } - - @Test - public void testConfigTaskConditionMet() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - ConfigUpgradePack cup = ambariMetaInfo.getConfigUpgradePack("HDP", "2.1.1"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, - context); - - assertEquals(7, groups.size()); - - // grab the configure task out of Hive - UpgradeGroupHolder hiveGroup = groups.get(4); - assertEquals("HIVE", hiveGroup.name); - - //Condition is met - ConfigureTask configureTask = (ConfigureTask) hiveGroup.items.get(2).getTasks().get( - 0).getTasks().get(0); - Map configProperties = configureTask.getConfigurationChanges(cluster, cup); - - assertFalse(configProperties.isEmpty()); - assertEquals(configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE), "hive-site"); - - assertTrue(configProperties.containsKey(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS)); - assertTrue(configProperties.containsKey(ConfigureTask.PARAMETER_REPLACEMENTS)); - assertTrue(configProperties.containsKey(ConfigureTask.PARAMETER_TRANSFERS)); - - String configurationJson = configProperties.get(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS); - String transferJson = configProperties.get(ConfigureTask.PARAMETER_TRANSFERS); - String replacementJson = configProperties.get(ConfigureTask.PARAMETER_REPLACEMENTS); - assertNotNull(configurationJson); - assertNotNull(transferJson); - assertNotNull(replacementJson); - - //if conditions for sets... - List keyValuePairs = m_gson.fromJson(configurationJson, - new TypeToken>() { - }.getType()); - assertEquals("setKeyOne", keyValuePairs.get(0).key); - assertEquals("1", keyValuePairs.get(0).value); - - assertEquals("setKeyTwo", keyValuePairs.get(1).key); - assertEquals("2", keyValuePairs.get(1).value); - - assertEquals("setKeyThree", keyValuePairs.get(2).key); - assertEquals("3", keyValuePairs.get(2).value); - - assertEquals("setKeyFour", keyValuePairs.get(3).key); - assertEquals("4", keyValuePairs.get(3).value); - - //if conditions for transfer - List transfers = m_gson.fromJson(transferJson, - new TypeToken>() { - }.getType()); - - assertEquals("copy-key-one", transfers.get(0).fromKey); - assertEquals("copy-to-key-one", transfers.get(0).toKey); - - assertEquals("copy-key-two", transfers.get(1).fromKey); - assertEquals("copy-to-key-two", transfers.get(1).toKey); - - assertEquals("copy-key-three", transfers.get(2).fromKey); - assertEquals("copy-to-key-three", transfers.get(2).toKey); - - assertEquals("copy-key-four", transfers.get(3).fromKey); - assertEquals("copy-to-key-four", transfers.get(3).toKey); - - assertEquals("move-key-one", transfers.get(4).fromKey); - assertEquals("move-to-key-one", transfers.get(4).toKey); - - assertEquals("move-key-two", transfers.get(5).fromKey); - assertEquals("move-to-key-two", transfers.get(5).toKey); - - assertEquals("move-key-three", transfers.get(6).fromKey); - assertEquals("move-to-key-three", transfers.get(6).toKey); - - assertEquals("move-key-four", transfers.get(7).fromKey); - assertEquals("move-to-key-four", transfers.get(7).toKey); - - assertEquals("delete-key-one", transfers.get(8).deleteKey); - assertEquals("delete-key-two", transfers.get(9).deleteKey); - assertEquals("delete-key-three", transfers.get(10).deleteKey); - assertEquals("delete-key-four", transfers.get(11).deleteKey); - - //if conditions for replace - List replacements = m_gson.fromJson(replacementJson, - new TypeToken>() { - }.getType()); - assertEquals("replace-key-one", replacements.get(0).key); - assertEquals("abc", replacements.get(0).find); - assertEquals("abc-replaced", replacements.get(0).replaceWith); - assertEquals("replace-key-two", replacements.get(1).key); - assertEquals("efg", replacements.get(1).find); - assertEquals("efg-replaced", replacements.get(1).replaceWith); - assertEquals("replace-key-three", replacements.get(2).key); - assertEquals("ijk", replacements.get(2).find); - assertEquals("ijk-replaced", replacements.get(2).replaceWith); - assertEquals("replace-key-four", replacements.get(3).key); - assertEquals("lmn", replacements.get(3).find); - assertEquals("lmn-replaced", replacements.get(3).replaceWith); - } - - @Test - public void testConfigTaskConditionSkipped() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - ConfigUpgradePack cup = ambariMetaInfo.getConfigUpgradePack("HDP", "2.1.1"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, - context); - - assertEquals(7, groups.size()); - - UpgradeGroupHolder hiveGroup = groups.get(4); - assertEquals("HIVE", hiveGroup.name); - - //Condition is not met, so no config operations should be present in the configureTask... - ConfigureTask configureTask = (ConfigureTask) hiveGroup.items.get(3).getTasks().get(0).getTasks().get(0); - Map configProperties = configureTask.getConfigurationChanges(cluster, cup); - - assertFalse(configProperties.isEmpty()); - assertEquals(configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE), "hive-site"); - - assertTrue(configProperties.containsKey(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS)); - assertTrue(configProperties.containsKey(ConfigureTask.PARAMETER_REPLACEMENTS)); - assertTrue(configProperties.containsKey(ConfigureTask.PARAMETER_TRANSFERS)); - - String configurationJson = configProperties.get(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS); - String transferJson = configProperties.get(ConfigureTask.PARAMETER_TRANSFERS); - - String replacementJson = configProperties.get(ConfigureTask.PARAMETER_REPLACEMENTS); - assertNotNull(configurationJson); - assertNotNull(transferJson); - assertNotNull(replacementJson); - - List keyValuePairs = m_gson.fromJson(configurationJson, - new TypeToken>() { - }.getType()); - assertTrue(keyValuePairs.isEmpty()); - - List replacements = m_gson.fromJson(replacementJson, - new TypeToken>() { - }.getType()); - assertTrue(replacements.isEmpty()); - - List transfers = m_gson.fromJson(transferJson, - new TypeToken>() { - }.getType()); - assertTrue(transfers.isEmpty()); - } - - /** - * Tests that {@link ConfigurationKeyValue} pairs on a {@link ConfigureTask} - * are correctly returned based on the if-conditions. - * - * @throws Exception - */ - @Test - public void testConfigureTask() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - ConfigUpgradePack cup = ambariMetaInfo.getConfigUpgradePack("HDP", "2.1.1"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, - context); - - assertEquals(7, groups.size()); - - // grab the first configure task out of Hive - UpgradeGroupHolder hiveGroup = groups.get(4); - assertEquals("HIVE", hiveGroup.name); - ConfigureTask configureTask = (ConfigureTask) hiveGroup.items.get(1).getTasks().get(0).getTasks().get(0); - - Map configProperties = configureTask.getConfigurationChanges(cluster, cup); - assertFalse(configProperties.isEmpty()); - assertEquals(configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE), "hive-site"); - - // now set the property in the if-check in the set element so that we have a match - Map hiveConfigs = new HashMap<>(); - hiveConfigs.put("fooKey", "THIS-BETTER-CHANGE"); - hiveConfigs.put("ifFooKey", "ifFooValue"); - - ConfigurationRequest configurationRequest = new ConfigurationRequest(); - configurationRequest.setClusterName(cluster.getClusterName()); - configurationRequest.setType("hive-site"); - configurationRequest.setVersionTag("version2"); - configurationRequest.setProperties(hiveConfigs); - - final ClusterRequest clusterRequest = new ClusterRequest( - cluster.getClusterId(), cluster.getClusterName(), - cluster.getDesiredStackVersion().getStackVersion(), null); - - clusterRequest.setDesiredConfig(singletonList(configurationRequest)); - m_managementController.updateClusters(new HashSet() { - { - add(clusterRequest); - } - }, null); - - // the configure task should now return different properties to set based on - // the if-condition checks - configProperties = configureTask.getConfigurationChanges(cluster, cup); - assertFalse(configProperties.isEmpty()); - assertEquals( configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE), "hive-site"); - - String configurationJson = configProperties.get(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS); - assertNotNull(configurationJson); - - List keyValuePairs = m_gson.fromJson( - configurationJson, - new TypeToken>() { - }.getType()); - - assertEquals("fooKey", keyValuePairs.get(0).key); - assertEquals("fooValue", keyValuePairs.get(0).value); - } - - @Test - public void testConfigureTaskWithMultipleConfigurations() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - ConfigUpgradePack cup = ambariMetaInfo.getConfigUpgradePack("HDP", "2.1.1"); - assertNotNull(upgrade); - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(7, groups.size()); - - // grab the configure task out of Hive - UpgradeGroupHolder hiveGroup = groups.get(4); - assertEquals("HIVE", hiveGroup.name); - ConfigureTask configureTask = (ConfigureTask) hiveGroup.items.get(1).getTasks().get(0).getTasks().get(0); - - Map configProperties = configureTask.getConfigurationChanges(cluster, cup); - assertFalse(configProperties.isEmpty()); - assertEquals(configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE), "hive-site"); - - String configurationJson = configProperties.get(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS); - String transferJson = configProperties.get(ConfigureTask.PARAMETER_TRANSFERS); - assertNotNull(configurationJson); - assertNotNull(transferJson); - - List keyValuePairs = m_gson.fromJson(configurationJson, - new TypeToken>() { - }.getType()); - - List transfers = m_gson.fromJson(transferJson, - new TypeToken>() { - }.getType()); - - assertEquals("fooKey", keyValuePairs.get(0).key); - assertEquals("fooValue", keyValuePairs.get(0).value); - assertEquals("fooKey2", keyValuePairs.get(1).key); - assertEquals("fooValue2", keyValuePairs.get(1).value); - assertEquals("fooKey3", keyValuePairs.get(2).key); - assertEquals("fooValue3", keyValuePairs.get(2).value); - - assertEquals("copy-key", transfers.get(0).fromKey); - assertEquals("copy-key-to", transfers.get(0).toKey); - - assertEquals("move-key", transfers.get(1).fromKey); - assertEquals("move-key-to", transfers.get(1).toKey); - } - - @Test - public void testServiceCheckUpgradeStages() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.2.0"); - assertTrue(upgrades.containsKey("upgrade_test_checks")); - UpgradePack upgrade = upgrades.get("upgrade_test_checks"); - assertNotNull(upgrade); - - // HBASE and PIG have service checks, but not TEZ. - Set additionalServices = new HashSet() {{ add("HBASE"); add("PIG"); add("TEZ"); add("AMBARI_METRICS"); }}; - Cluster c = makeCluster(true, additionalServices); - - int numServiceChecksExpected = 0; - Collection services = c.getServices().values(); - for(Service service : services) { - ServiceInfo si = ambariMetaInfo.getService(c.getCurrentStackVersion().getStackName(), - c.getCurrentStackVersion().getStackVersion(), service.getName()); - if (null == si.getCommandScript()) { - continue; - } - if (service.getName().equalsIgnoreCase("TEZ")) { - assertTrue("Expect Tez to not have any service checks", false); - } - - // Expect AMS to not run any service checks because it is excluded - if (service.getName().equalsIgnoreCase("AMBARI_METRICS")) { - continue; - } - numServiceChecksExpected++; - } - - UpgradeContext context = getMockUpgradeContext(c, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(8, groups.size()); - - UpgradeGroupHolder holder = groups.get(4); - assertEquals(holder.name, "SERVICE_CHECK_1"); - assertEquals(7, holder.items.size()); - int numServiceChecksActual = 0; - for (StageWrapper sw : holder.items) { - for(Service service : services) { - Pattern p = Pattern.compile(".*" + service.getName(), Pattern.CASE_INSENSITIVE); - Matcher matcher = p.matcher(sw.getText()); - if (matcher.matches()) { - numServiceChecksActual++; - continue; - } - } - } - - assertEquals(numServiceChecksActual, numServiceChecksExpected); - - // grab the manual task out of ZK which has placeholder text - UpgradeGroupHolder zookeeperGroup = groups.get(1); - assertEquals("ZOOKEEPER", zookeeperGroup.name); - ManualTask manualTask = (ManualTask) zookeeperGroup.items.get(0).getTasks().get( - 0).getTasks().get(0); - - assertEquals(1, manualTask.messages.size()); - assertEquals( - "This is a manual task with a placeholder of placeholder-rendered-properly", - manualTask.messages.get(0)); - - UpgradeGroupHolder clusterGroup = groups.get(3); - assertEquals(clusterGroup.name, "HBASE"); - assertEquals(clusterGroup.title, "Update HBase Configuration"); - assertEquals(1, clusterGroup.items.size()); - StageWrapper stage = clusterGroup.items.get(0); - assertEquals(stage.getText(), "Update HBase Configuration"); - } - - @Test - public void testServiceCheckDowngradeStages() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test_checks")); - UpgradePack upgrade = upgrades.get("upgrade_test_checks"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.DOWNGRADE, - UpgradeType.ROLLING, repositoryVersion2200); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(6, groups.size()); - - // grab the manual task out of ZK which has placeholder text - - UpgradeGroupHolder zookeeperGroup = groups.get(4); - assertEquals("ZOOKEEPER", zookeeperGroup.name); - ManualTask manualTask = (ManualTask) zookeeperGroup.items.get(0).getTasks().get( - 0).getTasks().get(0); - - assertEquals(1, manualTask.messages.size()); - assertEquals( - "This is a manual task with a placeholder of placeholder-rendered-properly", - manualTask.messages.get(0)); - } - - @Test - public void testUpgradeOrchestrationFullTask() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - ServiceInfo si = ambariMetaInfo.getService("HDP", "2.1.1", "ZOOKEEPER"); - si.setDisplayName("Zk"); - ComponentInfo ci = si.getComponentByName("ZOOKEEPER_SERVER"); - ci.setDisplayName("ZooKeeper1 Server2"); - - assertTrue(upgrades.containsKey("upgrade_to_new_stack")); - UpgradePack upgrade = upgrades.get("upgrade_to_new_stack"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(6, groups.size()); - - assertEquals("PRE_CLUSTER", groups.get(0).name); - assertEquals("ZOOKEEPER", groups.get(1).name); - assertEquals("CORE_MASTER", groups.get(2).name); - assertEquals("CORE_SLAVES", groups.get(3).name); - assertEquals("HIVE", groups.get(4).name); - - UpgradeGroupHolder holder = groups.get(2); - boolean found = false; - for (StageWrapper sw : holder.items) { - if (sw.getTasksJson().contains("Upgrading your database")) { - found = true; - } - } - assertTrue("Expected to find replaced text for Upgrading", found); - - UpgradeGroupHolder group = groups.get(1); - // check that the display name is being used - assertTrue(group.items.get(1).getText().contains("ZooKeeper1 Server2")); - assertEquals(group.items.get(4).getText(), "Service Check Zk"); - - group = groups.get(3); - assertEquals(8, group.items.size()); - StageWrapper sw = group.items.get(3); - assertEquals("Validate Partial Upgrade", sw.getText()); - assertEquals(1, sw.getTasks().size()); - assertEquals(1, sw.getTasks().get(0).getTasks().size()); - Task t = sw.getTasks().get(0).getTasks().get(0); - assertEquals(ManualTask.class, t.getClass()); - ManualTask mt = (ManualTask) t; - assertTrue(mt.messages.get(0).contains("DataNode and NodeManager")); - assertNotNull(mt.structuredOut); - assertTrue(mt.structuredOut.contains("DATANODE")); - assertTrue(mt.structuredOut.contains("NODEMANAGER")); - - UpgradeGroupHolder postGroup = groups.get(5); - assertEquals(postGroup.name, "POST_CLUSTER"); - assertEquals(postGroup.title, "Finalize Upgrade"); - assertEquals(4, postGroup.items.size()); - assertEquals("Confirm Finalize", postGroup.items.get(0).getText()); - assertEquals("Execute HDFS Finalize", postGroup.items.get(1).getText()); - assertEquals("Save Cluster State", postGroup.items.get(2).getText()); - assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(2).getType()); - assertEquals("Run On All 2.2.1.0-1234", postGroup.items.get(3).getText()); - - assertEquals(1, postGroup.items.get(3).getTasks().size()); - Set hosts = postGroup.items.get(3).getTasks().get(0).getHosts(); - assertNotNull(hosts); - assertEquals(4, hosts.size()); - - assertEquals(4, groups.get(0).items.size()); - assertEquals(5, groups.get(1).items.size()); - assertEquals(9, groups.get(2).items.size()); - assertEquals(8, groups.get(3).items.size()); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - - private Cluster makeCluster() throws AmbariException, AuthorizationException { - return makeCluster(true); - } - - - /** - * Create an HA cluster - * @throws AmbariException - */ - private Cluster makeCluster(boolean clean) throws AmbariException, AuthorizationException { - return makeCluster(clean, new HashSet<>()); - } - - /** - * Create an HA cluster - * @throws AmbariException - */ - private Cluster makeCluster(boolean clean, Set additionalServices) throws AmbariException, AuthorizationException { - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - - String repositoryVersionString = "2.1.1-1234"; - StackId stackId = new StackId("HDP-2.1.1"); - - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, - repositoryVersionString); - - helper.getOrCreateRepositoryVersion(STACK_ID_HDP_220, "2.2.0"); - - helper.getOrCreateRepositoryVersion(STACK_ID_HDP_220, UPGRADE_VERSION); - - for (int i = 0; i < 4; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // !!! add services - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "HDFS", "HDFS", repositoryVersion)); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "YARN", "YARN", repositoryVersion)); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "ZOOKEEPER", "ZOOKEEPER", repositoryVersion)); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "HIVE", "HIVE", repositoryVersion)); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "OOZIE", "OOZIE", repositoryVersion)); - - Service s = c.getService("HDFS"); - ServiceComponent sc = s.addServiceComponent("NAMENODE"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - sc = s.addServiceComponent("DATANODE"); - sc.addServiceComponentHost("h2"); - sc.addServiceComponentHost("h3"); - ServiceComponentHost sch = sc.addServiceComponentHost("h4"); - - s = c.getService("ZOOKEEPER"); - sc = s.addServiceComponent("ZOOKEEPER_SERVER"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - sc.addServiceComponentHost("h3"); - - s = c.getService("YARN"); - sc = s.addServiceComponent("RESOURCEMANAGER"); - sc.addServiceComponentHost("h2"); - - sc = s.addServiceComponent("NODEMANAGER"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h3"); - - s = c.getService("HIVE"); - sc = s.addServiceComponent("HIVE_SERVER"); - sc.addServiceComponentHost("h2"); - - s = c.getService("OOZIE"); - // Oozie Server HA - sc = s.addServiceComponent("OOZIE_SERVER"); - sc.addServiceComponentHost("h2"); - sc.addServiceComponentHost("h3"); - sc = s.addServiceComponent("OOZIE_CLIENT"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - sc.addServiceComponentHost("h3"); - - // set some desired configs - Map hiveConfigs = new HashMap<>(); - hiveConfigs.put("hive.server2.transport.mode", "binary"); - hiveConfigs.put("hive.server2.thrift.port", "10001"); - - ConfigurationRequest configurationRequest = new ConfigurationRequest(); - configurationRequest.setClusterName(clusterName); - configurationRequest.setType("hive-site"); - configurationRequest.setVersionTag("version1"); - configurationRequest.setProperties(hiveConfigs); - - final ClusterRequest clusterRequest = new ClusterRequest(c.getClusterId(), - clusterName, c.getDesiredStackVersion().getStackVersion(), null); - - clusterRequest.setDesiredConfig(singletonList(configurationRequest)); - m_managementController.updateClusters(new HashSet() { - { - add(clusterRequest); - } - }, null); - - HostsType type = HostsType.normal("h1", "h2", "h3"); - expect(m_masterHostResolver.getMasterAndHosts("ZOOKEEPER", "ZOOKEEPER_SERVER")).andReturn(type).anyTimes(); - expect(m_masterHostResolver.getMasterAndHosts("HDFS", "NAMENODE")).andReturn(namenodeHosts).anyTimes(); - - if (clean) { - type = HostsType.normal("h2", "h3", "h4"); - } else { - type = HostsType.normal("h2", "h3"); - type.unhealthy = singletonList(sch); - } - expect(m_masterHostResolver.getMasterAndHosts("HDFS", "DATANODE")).andReturn(type).anyTimes(); - - type = HostsType.normal("h2"); - expect(m_masterHostResolver.getMasterAndHosts("YARN", "RESOURCEMANAGER")).andReturn(type).anyTimes(); - - type = HostsType.normal(Sets.newLinkedHashSet()); - expect(m_masterHostResolver.getMasterAndHosts("YARN", "APP_TIMELINE_SERVER")).andReturn(type).anyTimes(); - - type = HostsType.normal("h1", "h3"); - expect(m_masterHostResolver.getMasterAndHosts("YARN", "NODEMANAGER")).andReturn(type).anyTimes(); - - expect(m_masterHostResolver.getMasterAndHosts("HIVE", "HIVE_SERVER")).andReturn( - type).anyTimes(); - - type = HostsType.normal("h2", "h3"); - expect(m_masterHostResolver.getMasterAndHosts("OOZIE", "OOZIE_SERVER")).andReturn(type).anyTimes(); - - type = HostsType.normal("h1", "h2", "h3"); - expect(m_masterHostResolver.getMasterAndHosts("OOZIE", "OOZIE_CLIENT")).andReturn(type).anyTimes(); - - expect(m_masterHostResolver.getCluster()).andReturn(c).anyTimes(); - - for(String service : additionalServices) { - c.addService(serviceGroup, service, service, repositoryVersion); - if (service.equals("HBASE")) { - type = HostsType.normal("h1", "h2"); - expect(m_masterHostResolver.getMasterAndHosts("HBASE", "HBASE_MASTER")).andReturn(type).anyTimes(); - } - } - - replay(m_masterHostResolver); - - return c; - } - - /** - * Test that multiple execute tasks with an annotation of synchronized="true" each run in their own stage. - */ - @Test - public void testUpgradeWithMultipleTasksInOwnStage() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - assertTrue(upgrades.containsKey("upgrade_test")); - UpgradePack upgrade = upgrades.get("upgrade_test"); - assertNotNull(upgrade); - assertTrue(upgrade.getType() == UpgradeType.ROLLING); - - List upgradePackGroups = upgrade.getGroups(Direction.UPGRADE); - - boolean foundService = false; - for (Grouping group : upgradePackGroups) { - if (group.title.equals("Oozie")) { - foundService = true; - } - } - assertTrue(foundService); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - // The upgrade pack has 2 tasks for Oozie in the pre-upgrade group. - // The first task runs on "all", i.e., both Oozie Servers, whereas the - // second task runs on "any", i.e., exactly one. - int numPrepareStages = 0; - for (UpgradeGroupHolder group : groups) { - if (group.name.equals("OOZIE")) { - assertTrue(group.items.size() > 0); - for (StageWrapper sw : group.items) { - - if (sw.getText().equalsIgnoreCase("Preparing Oozie Server on h2 (Batch 1 of 2)") || - sw.getText().equalsIgnoreCase("Preparing Oozie Server on h3 (Batch 2 of 2)")) { - numPrepareStages++; - List taskWrappers = sw.getTasks(); - assertEquals(1, taskWrappers.size()); - List tasks = taskWrappers.get(0).getTasks(); - assertEquals(1, taskWrappers.get(0).getHosts().size()); - assertEquals(1, tasks.size()); - - ExecuteTask task = (ExecuteTask) tasks.get(0); - assertTrue("scripts/oozie_server.py".equalsIgnoreCase(task.script)); - assertTrue("stop".equalsIgnoreCase(task.function)); - } - - if (sw.getText().equalsIgnoreCase("Preparing Oozie Server on h2")) { - numPrepareStages++; - List taskWrappers = sw.getTasks(); - assertEquals(1, taskWrappers.size()); - List tasks = taskWrappers.get(0).getTasks(); - assertEquals(1, taskWrappers.get(0).getHosts().size()); - assertEquals(1, tasks.size()); - - ExecuteTask task = (ExecuteTask) tasks.get(0); - assertTrue("scripts/oozie_server_upgrade.py".equalsIgnoreCase(task.script)); - assertTrue("upgrade_oozie_database_and_sharelib".equalsIgnoreCase(task.function)); - } - } - } - } - assertEquals(3, numPrepareStages); - } - - @Test - public void testDowngradeAfterPartialUpgrade() throws Exception { - - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - - String version = "2.1.1.0-1234"; - StackId stackId = new StackId("HDP-2.1.1"); - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, version); - - for (int i = 0; i < 2; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // !!! add services - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "HDFS", "HDFS", repositoryVersion)); - - Service s = c.getService("HDFS"); - ServiceComponent sc = s.addServiceComponent("NAMENODE"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - - List schs = c.getServiceComponentHosts("HDFS", "NAMENODE"); - assertEquals(2, schs.size()); - - HostsType type = HostsType.highAvailability("h1", "h2", new LinkedHashSet<>(emptySet())); - - expect(m_masterHostResolver.getMasterAndHosts("ZOOKEEPER", "ZOOKEEPER_SERVER")).andReturn(null).anyTimes(); - expect(m_masterHostResolver.getMasterAndHosts("HDFS", "NAMENODE")).andReturn(type).anyTimes(); - expect(m_masterHostResolver.getCluster()).andReturn(c).anyTimes(); - replay(m_masterHostResolver); - - UpgradeContext context = getMockUpgradeContext(c, Direction.DOWNGRADE, UpgradeType.ROLLING, - repositoryVersion2200); - - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_direction")); - UpgradePack upgrade = upgrades.get("upgrade_direction"); - assertNotNull(upgrade); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - assertEquals(2, groups.size()); - - UpgradeGroupHolder group = groups.get(0); - assertEquals(1, group.items.size()); - assertEquals("PRE_POST_CLUSTER", group.name); - - group = groups.get(1); - assertEquals("POST_CLUSTER", group.name); - assertEquals(3, group.items.size()); - - - StageWrapper stage = group.items.get(1); - assertEquals("NameNode Finalize", stage.getText()); - assertEquals(1, stage.getTasks().size()); - TaskWrapper task = stage.getTasks().get(0); - assertEquals(1, task.getHosts().size()); - } - - @Test - public void testResolverWithFailedUpgrade() throws Exception { - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - - StackId stackId = new StackId("HDP-2.1.1"); - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - for (int i = 0; i < 2; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // !!! add services - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "ZOOKEEPER", "ZOOKEEPER", repositoryVersion2110)); - - Service s = c.getService("ZOOKEEPER"); - ServiceComponent sc = s.addServiceComponent("ZOOKEEPER_SERVER"); - - ServiceComponentHost sch1 = sc.addServiceComponentHost("h1"); - sch1.setVersion(repositoryVersion2110.getVersion()); - - ServiceComponentHost sch2 = sc.addServiceComponentHost("h2"); - sch2.setVersion(repositoryVersion2110.getVersion()); - - List schs = c.getServiceComponentHosts("ZOOKEEPER", "ZOOKEEPER_SERVER"); - assertEquals(2, schs.size()); - - UpgradeContext context = getMockUpgradeContextNoReplay(c, Direction.UPGRADE, - UpgradeType.HOST_ORDERED, repositoryVersion2110); - - MasterHostResolver resolver = new MasterHostResolver(c, m_configHelper, context); - expect(context.getResolver()).andReturn(resolver).anyTimes(); - replay(context); - - HostsType ht = resolver.getMasterAndHosts("ZOOKEEPER", "ZOOKEEPER_SERVER"); - assertEquals(0, ht.getHosts().size()); - - // !!! if one of them is failed, it should be scheduled - sch2.setUpgradeState(UpgradeState.FAILED); - - ht = resolver.getMasterAndHosts("ZOOKEEPER", "ZOOKEEPER_SERVER"); - - assertEquals(1, ht.getHosts().size()); - assertEquals("h2", ht.getHosts().iterator().next()); - } - - /** - * Test that MasterHostResolver is case-insensitive even if configs have hosts in upper case for NameNode. - * @throws Exception - */ - @Test - public void testResolverCaseInsensitive() throws Exception { - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - String version = "2.1.1.0-1234"; - - StackId stackId = new StackId("HDP-2.1.1"); - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - RepositoryVersionEntity repositoryVersion211 = helper.getOrCreateRepositoryVersion(stackId, - version); - - for (int i = 0; i < 2; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // Add services - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "HDFS", "HDFS", repositoryVersion211)); - - Service s = c.getService("HDFS"); - ServiceComponent sc = s.addServiceComponent("NAMENODE"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - - List schs = c.getServiceComponentHosts("HDFS", "NAMENODE"); - assertEquals(2, schs.size()); - - setConfigMocks(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.internal.nameservices")).andReturn("ha").anyTimes(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.ha.namenodes.ha")).andReturn("nn1,nn2").anyTimes(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.http.policy")).andReturn("HTTP_ONLY").anyTimes(); - - // Notice that these names are all caps. - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.namenode.http-address.ha.nn1")).andReturn("H1:50070").anyTimes(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.namenode.http-address.ha.nn2")).andReturn("H2:50070").anyTimes(); - replay(m_configHelper); - - UpgradeContext context = getMockUpgradeContextNoReplay(c, Direction.UPGRADE, - UpgradeType.NON_ROLLING, repositoryVersion211); - - // use a "real" master host resolver here so that we can actually test MM - MasterHostResolver mhr = new MockMasterHostResolver(c, m_configHelper, context); - - expect(context.getResolver()).andReturn(mhr).anyTimes(); - replay(context); - - - HostsType ht = mhr.getMasterAndHosts("HDFS", "NAMENODE"); - assertNotNull(ht.getMasters()); - assertNotNull(ht.getSecondaries()); - assertEquals(2, ht.getHosts().size()); - - // Should be stored in lowercase. - assertTrue(ht.getHosts().contains("h1")); - assertTrue(ht.getHosts().contains("h1")); - } - - @Test - public void testResolverBadJmx() throws Exception { - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - String version = "2.1.1.0-1234"; - - StackId stackId = new StackId("HDP-2.1.1"); - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - RepositoryVersionEntity repositoryVersion211 = helper.getOrCreateRepositoryVersion(stackId, version); - - for (int i = 0; i < 2; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // Add services - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "HDFS", "HDFS", repositoryVersion211)); - - Service s = c.getService("HDFS"); - ServiceComponent sc = s.addServiceComponent("NAMENODE"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - - List schs = c.getServiceComponentHosts("HDFS", "NAMENODE"); - assertEquals(2, schs.size()); - - setConfigMocks(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.internal.nameservices")).andReturn("ha").anyTimes(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.ha.namenodes.ha")).andReturn("nn1,nn2").anyTimes(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.http.policy")).andReturn("HTTP_ONLY").anyTimes(); - - // Notice that these names are all caps. - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.namenode.http-address.ha.nn1")).andReturn("H1:50070").anyTimes(); - expect(m_configHelper.getValueFromDesiredConfigurations(c, "hdfs-site", "dfs.namenode.http-address.ha.nn2")).andReturn("H2:50070").anyTimes(); - replay(m_configHelper); - - UpgradeContext context = getMockUpgradeContextNoReplay(c, Direction.UPGRADE, - UpgradeType.NON_ROLLING, repositoryVersion211); - - // use a "real" master host resolver here so that we can actually test MM - MasterHostResolver mhr = new BadMasterHostResolver(c, m_configHelper, context); - - expect(context.getResolver()).andReturn(mhr).anyTimes(); - replay(context); - - HostsType ht = mhr.getMasterAndHosts("HDFS", "NAMENODE"); - assertNotNull(ht.getMasters()); - assertNotNull(ht.getSecondaries()); - assertEquals(2, ht.getHosts().size()); - - // Should be stored in lowercase. - assertTrue(ht.getHosts().contains("h1")); - assertTrue(ht.getHosts().contains("h2")); - } - - - /** - * Tests that advanced {@link Grouping} instances like {@link StopGrouping} - * work with rolling upgrade packs. - * - * @throws Exception - */ - @Test - public void testRollingUpgradesCanUseAdvancedGroupings() throws Exception { - final String clusterName = "c1"; - final StackId sourceStackId = new StackId("HDP", "2.1.1"); - final StackId targetStackId = new StackId("HDP", "2.2.0"); - final Direction upgradeDirection = Direction.UPGRADE; - final UpgradeType upgradeType = UpgradeType.ROLLING; - - Cluster cluster = makeCluster(); - - // grab the right pack - String preferredUpgradePackName = "upgrade_grouping_rolling"; - UpgradePack upgradePack = m_upgradeHelper.suggestUpgradePack(clusterName, sourceStackId, - targetStackId, upgradeDirection, upgradeType, preferredUpgradePackName); - - assertEquals(upgradeType, upgradePack.getType()); - - // get an upgrade - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING, - repositoryVersion2210, RepositoryType.STANDARD, Collections.singleton("ZOOKEEPER")); - - List groupings = upgradePack.getGroups(Direction.UPGRADE); - assertEquals(2, groupings.size()); - assertEquals("STOP_ZOOKEEPER", groupings.get(0).name); - assertEquals("RESTART_ZOOKEEPER", groupings.get(1).name); - - List groups = m_upgradeHelper.createSequence(upgradePack, context); - - assertEquals(2, groups.size()); - - assertEquals("STOP_ZOOKEEPER", groups.get(0).name); - assertEquals("RESTART_ZOOKEEPER", groups.get(1).name); - - // STOP_ZOOKEEPER GROUP - UpgradeGroupHolder group = groups.get(0); - - // Check that the upgrade framework properly expanded the STOP grouping into - // STOP tasks - assertEquals("Stopping ZooKeeper Server on h1 (Batch 1 of 3)", group.items.get(0).getText()); - } - - @Test - public void testOrchestrationNoServerSideOnDowngrade() throws Exception { - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - - String version = "2.1.1.0-1234"; - StackId stackId = new StackId("HDP-2.1.1"); - StackId stackId2 = new StackId("HDP-2.2.0"); - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - RepositoryVersionEntity repoVersion211 = helper.getOrCreateRepositoryVersion(stackId, - version); - - RepositoryVersionEntity repoVersion220 = helper.getOrCreateRepositoryVersion(stackId2, "2.2.0"); - - for (int i = 0; i < 2; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // !!! add storm - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "STORM", "STORM", repoVersion211)); - - Service s = c.getService("STORM"); - ServiceComponent sc = s.addServiceComponent("NIMBUS"); - ServiceComponentHost sch1 = sc.addServiceComponentHost("h1"); - ServiceComponentHost sch2 = sc.addServiceComponentHost("h2"); - - UpgradePack upgradePack = new UpgradePack() { - @Override - public List getGroups(Direction direction) { - - Grouping g = new Grouping(); - - OrderService orderService = new OrderService(); - orderService.serviceName = "STORM"; - orderService.components = singletonList("NIMBUS"); - - g.name = "GROUP1"; - g.title = "Nimbus Group"; - g.services.add(orderService); - - return Lists.newArrayList(g); - } - - @Override - public Map> getTasks() { - ManualTask mt = new ManualTask(); - mt.messages = Lists.newArrayList("My New Message"); - - ProcessingComponent pc = new ProcessingComponent(); - pc.name = "NIMBUS_MESSAGE"; - pc.preTasks = Lists.newArrayList(mt); - - return Collections.singletonMap("STORM", Collections.singletonMap("NIMBUS", pc)); - } - - }; - - UpgradeContext context = getMockUpgradeContextNoReplay(c, Direction.UPGRADE, - UpgradeType.NON_ROLLING, repoVersion220); - - // use a "real" master host resolver here so that we can actually test MM - MasterHostResolver masterHostResolver = new MasterHostResolver(c, m_configHelper, context); - - expect(context.getResolver()).andReturn(masterHostResolver).anyTimes(); - replay(context); - - List groups = m_upgradeHelper.createSequence(upgradePack, context); - - assertEquals(1, groups.size()); - - sch1.setVersion(repoVersion211.getVersion()); - sch2.setVersion(repoVersion211.getVersion()); - - context = getMockUpgradeContextNoReplay(c, Direction.DOWNGRADE, UpgradeType.NON_ROLLING, - repoVersion211); - - // use a "real" master host resolver here so that we can actually test MM - masterHostResolver = new MasterHostResolver(c, m_configHelper, context); - - expect(context.getResolver()).andReturn(masterHostResolver).anyTimes(); - replay(context); - - groups = m_upgradeHelper.createSequence(upgradePack, context); - - assertTrue(groups.isEmpty()); - } - - @Test - public void testMultipleServerTasks() throws Exception { - - // !!! make a two node cluster with just ZK - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - - String version = "2.1.1.0-1234"; - StackId stackId = new StackId("HDP-2.1.1"); - StackId stackId2 = new StackId("HDP-2.2.0"); - - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, - version); - - RepositoryVersionEntity repoVersion220 = helper.getOrCreateRepositoryVersion(stackId2, "2.2.0"); - - helper.getOrCreateRepositoryVersion(stackId2, UPGRADE_VERSION); - - for (int i = 0; i < 2; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // !!! add services - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "ZOOKEEPER", "ZOOKEEPER", repositoryVersion)); - - Service s = c.getService("ZOOKEEPER"); - ServiceComponent sc = s.addServiceComponent("ZOOKEEPER_SERVER"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - - sc = s.addServiceComponent("ZOOKEEPER_CLIENT"); - sc.addServiceComponentHost("h1"); - sc.addServiceComponentHost("h2"); - - EasyMock.reset(m_masterHostResolver); - - expect(m_masterHostResolver.getCluster()).andReturn(c).anyTimes(); - - HostsType type = HostsType.normal("h1", "h2"); - expect(m_masterHostResolver.getMasterAndHosts("ZOOKEEPER", "ZOOKEEPER_SERVER")).andReturn(type).anyTimes(); - - type = HostsType.normal("h1", "h2"); - expect(m_masterHostResolver.getMasterAndHosts("ZOOKEEPER", "ZOOKEEPER_CLIENT")).andReturn(type).anyTimes(); - - - replay(m_masterHostResolver); - - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - - ServiceInfo si = ambariMetaInfo.getService("HDP", "2.1.1", "ZOOKEEPER"); - si.setDisplayName("Zk"); - ComponentInfo ci = si.getComponentByName("ZOOKEEPER_SERVER"); - ci.setDisplayName("ZooKeeper1 Server2"); - - UpgradePack upgrade = upgrades.get("upgrade_multi_server_tasks"); - assertNotNull(upgrade); - - UpgradeContext context = getMockUpgradeContext(c, Direction.UPGRADE, UpgradeType.NON_ROLLING, - repoVersion220); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(2, groups.size()); - - - // zk server as a colocated grouping first. XML says to run a manual, 2 configs, and an execute - UpgradeGroupHolder group1 = groups.get(0); - assertEquals(7, group1.items.size()); - - // Stage 1. manual, 2 configs, execute - assertEquals(4, group1.items.get(0).getTasks().size()); - TaskWrapper taskWrapper = group1.items.get(0).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.MANUAL, taskWrapper.getTasks().get(0).getType()); - - taskWrapper = group1.items.get(0).getTasks().get(1); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.CONFIGURE, taskWrapper.getTasks().get(0).getType()); - - taskWrapper = group1.items.get(0).getTasks().get(2); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.CONFIGURE, taskWrapper.getTasks().get(0).getType()); - - taskWrapper = group1.items.get(0).getTasks().get(3); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.EXECUTE, taskWrapper.getTasks().get(0).getType()); - - // Stage 2. restart for h1 - assertEquals(1, group1.items.get(1).getTasks().size()); - taskWrapper = group1.items.get(1).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.RESTART, taskWrapper.getTasks().get(0).getType()); - assertTrue(taskWrapper.getHosts().contains("h1")); - - // Stage 3. service check - assertEquals(1, group1.items.get(2).getTasks().size()); - taskWrapper = group1.items.get(2).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.SERVICE_CHECK, taskWrapper.getTasks().get(0).getType()); - - // stage 4. manual step for validation - assertEquals(1, group1.items.get(3).getTasks().size()); - taskWrapper = group1.items.get(3).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.MANUAL, taskWrapper.getTasks().get(0).getType()); - - // Stage 5. repeat execute as it's not a server-side task. no configure or manual tasks - assertEquals(1, group1.items.get(4).getTasks().size()); - taskWrapper = group1.items.get(4).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.EXECUTE, taskWrapper.getTasks().get(0).getType()); - - // Stage 6. restart for h2. - assertEquals(1, group1.items.get(5).getTasks().size()); - taskWrapper = group1.items.get(5).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.RESTART, taskWrapper.getTasks().get(0).getType()); - assertTrue(taskWrapper.getHosts().contains("h2")); - - // Stage 7. service check - assertEquals(1, group1.items.get(6).getTasks().size()); - taskWrapper = group1.items.get(6).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.SERVICE_CHECK, taskWrapper.getTasks().get(0).getType()); - - - // zk client - UpgradeGroupHolder group2 = groups.get(1); - assertEquals(5, group2.items.size()); - - // Stage 1. Configure - assertEquals(1, group2.items.get(0).getTasks().size()); - taskWrapper = group2.items.get(0).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.CONFIGURE, taskWrapper.getTasks().get(0).getType()); - - // Stage 2. Custom class - assertEquals(1, group2.items.get(1).getTasks().size()); - taskWrapper = group2.items.get(1).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.SERVER_ACTION, taskWrapper.getTasks().get(0).getType()); - - // Stage 3. Restart client on h1 - assertEquals(1, group2.items.get(2).getTasks().size()); - taskWrapper = group2.items.get(2).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.RESTART, taskWrapper.getTasks().get(0).getType()); - - // Stage 4. Restart client on h2 (no configure or custom class) - assertEquals(1, group2.items.get(3).getTasks().size()); - taskWrapper = group2.items.get(3).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.RESTART, taskWrapper.getTasks().get(0).getType()); - - // Stage 5. service check - assertEquals(1, group2.items.get(4).getTasks().size()); - taskWrapper = group2.items.get(4).getTasks().get(0); - assertEquals(1, taskWrapper.getTasks().size()); - assertEquals(Task.Type.SERVICE_CHECK, taskWrapper.getTasks().get(0).getType()); - - } - - - - - /** - * Tests {@link UpgradeType#HOST_ORDERED}, specifically that the orchestration - * can properly expand the single {@link HostOrderGrouping} and create the - * correct stages based on the dependencies of the components. - * - * @throws Exception - */ - @Test - public void testHostGroupingOrchestration() throws Exception { - Clusters clusters = injector.getInstance(Clusters.class); - ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class); - ServiceGroupFactory serviceGroupFactory = injector.getInstance(ServiceGroupFactory.class); - - String clusterName = "c1"; - - String version = "2.1.1.0-1234"; - StackId stackId = new StackId("HDP-2.1.1"); - StackId stackId2 = new StackId("HDP-2.2.0"); - clusters.addCluster(clusterName, stackId); - Cluster c = clusters.getCluster(clusterName); - - RepositoryVersionEntity repoVersion211 = helper.getOrCreateRepositoryVersion(stackId, version); - - RepositoryVersionEntity repoVersion220 = helper.getOrCreateRepositoryVersion(stackId2, "2.2.0"); - - // create 2 hosts - for (int i = 0; i < 2; i++) { - String hostName = "h" + (i+1); - clusters.addHost(hostName); - Host host = clusters.getHost(hostName); - - Map hostAttributes = new HashMap<>(); - hostAttributes.put("os_family", "redhat"); - hostAttributes.put("os_release_version", "6"); - - host.setHostAttributes(hostAttributes); - - clusters.mapHostToCluster(hostName, clusterName); - } - - // add ZK Server to both hosts, and then Nimbus to only 1 - this will test - // how the HOU breaks out dependencies into stages - ServiceGroup serviceGroup = serviceGroupFactory.createNew(c, "service_group", STACK_ID_HDP_211, new HashSet()); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "ZOOKEEPER", "ZOOKEEPER", repoVersion211)); - c.addService(serviceFactory.createNew(c, serviceGroup, new ArrayList(), "HBASE", "HBASE", repoVersion211)); - Service zookeeper = c.getService("ZOOKEEPER"); - Service hbase = c.getService("HBASE"); - ServiceComponent zookeeperServer = zookeeper.addServiceComponent("ZOOKEEPER_SERVER"); - ServiceComponentHost zookeeperServer1 = zookeeperServer.addServiceComponentHost("h1"); - ServiceComponentHost zookeeperServer2 = zookeeperServer.addServiceComponentHost("h2"); - ServiceComponent hbaseMaster = hbase.addServiceComponent("HBASE_MASTER"); - ServiceComponentHost hbaseMaster1 = hbaseMaster.addServiceComponentHost("h1"); - - // !!! make a custom grouping - HostOrderItem hostItem = new HostOrderItem(HostOrderActionType.HOST_UPGRADE, - Lists.newArrayList("h1", "h2")); - - HostOrderItem checkItem = new HostOrderItem(HostOrderActionType.SERVICE_CHECK, - Lists.newArrayList("ZOOKEEPER", "HBASE")); - - Grouping g = new HostOrderGrouping(); - ((HostOrderGrouping) g).setHostOrderItems(Lists.newArrayList(hostItem, checkItem)); - g.title = "Some Title"; - - UpgradePack upgradePack = new UpgradePack(); - - // !!! set the groups directly; allow the logic in getGroups(Direction) to happen - Field field = UpgradePack.class.getDeclaredField("groups"); - field.setAccessible(true); - field.set(upgradePack, Lists.newArrayList(g)); - - field = UpgradePack.class.getDeclaredField("type" ); - field.setAccessible(true); - field.set(upgradePack, UpgradeType.HOST_ORDERED); - - UpgradeContext context = getMockUpgradeContextNoReplay(c, Direction.UPGRADE, - UpgradeType.HOST_ORDERED, repoVersion220); - - MasterHostResolver resolver = new MasterHostResolver(c, m_configHelper, context); - expect(context.getResolver()).andReturn(resolver).anyTimes(); - replay(context); - - - List groups = m_upgradeHelper.createSequence(upgradePack, context); - - assertEquals(1, groups.size()); - - UpgradeGroupHolder holder = groups.get(0); - assertEquals(9, holder.items.size()); - - for (int i = 0; i < 7; i++) { - StageWrapper w = holder.items.get(i); - if (i == 0 || i == 4) { - assertEquals(StageWrapper.Type.STOP, w.getType()); - } else if (i == 1 || i == 5) { - assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, w.getType()); - assertEquals(1, w.getTasks().size()); - assertEquals(1, w.getTasks().get(0).getTasks().size()); - Task t = w.getTasks().get(0).getTasks().get(0); - assertEquals(ManualTask.class, t.getClass()); - ManualTask mt = (ManualTask) t; - assertNotNull(mt.structuredOut); - assertTrue(mt.structuredOut.contains("type")); - assertTrue(mt.structuredOut.contains(HostOrderItem.HostOrderActionType.HOST_UPGRADE.toString())); - assertTrue(mt.structuredOut.contains("host")); - assertTrue(mt.structuredOut.contains(i == 1 ? "h1" : "h2")); - } else { - assertEquals(StageWrapper.Type.RESTART, w.getType()); - } - } - - assertEquals(StageWrapper.Type.SERVICE_CHECK, holder.items.get(7).getType()); - assertEquals(StageWrapper.Type.SERVICE_CHECK, holder.items.get(8).getType()); - - // !!! test downgrade when all host components have failed - zookeeperServer1.setVersion(repoVersion211.getVersion()); - zookeeperServer2.setVersion(repoVersion211.getVersion()); - hbaseMaster1.setVersion(repoVersion211.getVersion()); - - context = getMockUpgradeContextNoReplay(c, Direction.DOWNGRADE, UpgradeType.HOST_ORDERED, - repoVersion211); - - resolver = new MasterHostResolver(c, m_configHelper, context); - expect(context.getResolver()).andReturn(resolver).anyTimes(); - replay(context); - - groups = m_upgradeHelper.createSequence(upgradePack, context); - - assertEquals(1, groups.size()); - assertEquals(2, groups.get(0).items.size()); - - // !!! test downgrade when one of the hosts had failed - zookeeperServer1.setVersion(repoVersion211.getVersion()); - zookeeperServer2.setVersion(repoVersion220.getVersion()); - hbaseMaster1.setVersion(repoVersion211.getVersion()); - - context = getMockUpgradeContextNoReplay(c, Direction.DOWNGRADE, UpgradeType.HOST_ORDERED, - repoVersion211); - - resolver = new MasterHostResolver(c, m_configHelper, context); - expect(context.getResolver()).andReturn(resolver).anyTimes(); - replay(context); - - groups = m_upgradeHelper.createSequence(upgradePack, context); - - assertEquals(1, groups.size()); - assertEquals(5, groups.get(0).items.size()); - } - - /** - * Tests that the {@link SecurityCondition} element correctly restricts the groups in - * an upgrade. - * - * @throws Exception - */ - @Test - public void testUpgradeConditions() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.2.0"); - assertTrue(upgrades.containsKey("upgrade_test_conditions")); - UpgradePack upgrade = upgrades.get("upgrade_test_conditions"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING); - - // initially, no conditions should be met, so only 1 group should be - // available - List groups = m_upgradeHelper.createSequence(upgrade, context); - assertEquals(1, groups.size()); - - // from that 1 group, only 1 task is condition-less - List stageWrappers = groups.get(0).items; - assertEquals(1, stageWrappers.size()); - assertEquals(1, stageWrappers.get(0).getTasks().size()); - - // set the configuration property and try again - Map fooConfigs = new HashMap<>(); - fooConfigs.put("foo-property", "foo-value"); - ConfigurationRequest configurationRequest = new ConfigurationRequest(); - configurationRequest.setClusterName(cluster.getClusterName()); - configurationRequest.setType("foo-site"); - configurationRequest.setVersionTag("version1"); - configurationRequest.setProperties(fooConfigs); - - final ClusterRequest clusterRequest = new ClusterRequest(cluster.getClusterId(), - cluster.getClusterName(), cluster.getDesiredStackVersion().getStackVersion(), null); - - clusterRequest.setDesiredConfig(singletonList(configurationRequest)); - m_managementController.updateClusters(Sets.newHashSet(clusterRequest), null); - - // the config condition should now be set - groups = m_upgradeHelper.createSequence(upgrade, context); - assertEquals(2, groups.size()); - assertEquals("ZOOKEEPER_CONFIG_CONDITION_TEST", groups.get(0).name); - - // now change the cluster security so the other conditions come back too - cluster.setSecurityType(SecurityType.KERBEROS); - - groups = m_upgradeHelper.createSequence(upgrade, context); - assertEquals(4, groups.size()); - } - - /** - * Tests merging configurations between existing and new stack values on - * upgrade. - * - * @throws Exception - */ - @Test - public void testMergeConfigurations() throws Exception { - RepositoryVersionEntity repoVersion211 = createNiceMock(RepositoryVersionEntity.class); - RepositoryVersionEntity repoVersion220 = createNiceMock(RepositoryVersionEntity.class); - - StackId stack211 = new StackId("HDP-2.1.1"); - StackId stack220 = new StackId("HDP-2.2.0"); - - String version211 = "2.1.1.0-1234"; - String version220 = "2.2.0.0-1234"; - - expect(repoVersion211.getStackId()).andReturn(stack211).atLeastOnce(); - expect(repoVersion211.getVersion()).andReturn(version211).atLeastOnce(); - - expect(repoVersion220.getStackId()).andReturn(stack220).atLeastOnce(); - expect(repoVersion220.getVersion()).andReturn(version220).atLeastOnce(); - - Map> stack211Configs = new HashMap<>(); - Map stack211FooType = new HashMap<>(); - Map stack211BarType = new HashMap<>(); - Map stack211BazType = new HashMap<>(); - stack211Configs.put("foo-site", stack211FooType); - stack211Configs.put("bar-site", stack211BarType); - stack211Configs.put("baz-site", stack211BazType); - stack211FooType.put("1", "one"); - stack211FooType.put("1A", "one-A"); - stack211BarType.put("2", "two"); - stack211BazType.put("3", "three"); - - Map> stack220Configs = new HashMap<>(); - Map stack220FooType = new HashMap<>(); - Map stack220BazType = new HashMap<>(); - stack220Configs.put("foo-site", stack220FooType); - stack220Configs.put("baz-site", stack220BazType); - stack220FooType.put("1", "one-new"); - stack220FooType.put("1A1", "one-A-one"); - stack220BazType.put("3", "three-new"); - - Map existingFooType = new HashMap<>(); - Map existingBarType = new HashMap<>(); - Map existingBazType = new HashMap<>(); - - ClusterConfigEntity fooConfigEntity = createNiceMock(ClusterConfigEntity.class); - ClusterConfigEntity barConfigEntity = createNiceMock(ClusterConfigEntity.class); - ClusterConfigEntity bazConfigEntity = createNiceMock(ClusterConfigEntity.class); - - expect(fooConfigEntity.getType()).andReturn("foo-site"); - expect(barConfigEntity.getType()).andReturn("bar-site"); - expect(bazConfigEntity.getType()).andReturn("baz-site"); - - Config fooConfig = createNiceMock(Config.class); - Config barConfig = createNiceMock(Config.class); - Config bazConfig = createNiceMock(Config.class); - - existingFooType.put("1", "one"); - existingFooType.put("1A", "one-A"); - existingBarType.put("2", "two"); - existingBazType.put("3", "three-changed"); - - expect(fooConfig.getType()).andReturn("foo-site").atLeastOnce(); - expect(barConfig.getType()).andReturn("bar-site").atLeastOnce(); - expect(bazConfig.getType()).andReturn("baz-site").atLeastOnce(); - expect(fooConfig.getProperties()).andReturn(existingFooType); - expect(barConfig.getProperties()).andReturn(existingBarType); - expect(bazConfig.getProperties()).andReturn(existingBazType); - - Map desiredConfigurations = new HashMap<>(); - desiredConfigurations.put("foo-site", null); - desiredConfigurations.put("bar-site", null); - desiredConfigurations.put("baz-site", null); - - Service zookeeper = createNiceMock(Service.class); - expect(zookeeper.getName()).andReturn("ZOOKEEPER").anyTimes(); - expect(zookeeper.getServiceType()).andReturn("ZOOKEEPER").anyTimes(); - expect(zookeeper.getServiceComponents()).andReturn( - new HashMap<>()).once(); - zookeeper.setDesiredRepositoryVersion(repoVersion220); - expectLastCall().once(); - - Cluster cluster = createNiceMock(Cluster.class); - expect(cluster.getCurrentStackVersion()).andReturn(stack211).atLeastOnce(); - expect(cluster.getDesiredStackVersion()).andReturn(stack220); - expect(cluster.getDesiredConfigs()).andReturn(desiredConfigurations); - expect(cluster.getDesiredConfigByType("foo-site")).andReturn(fooConfig); - expect(cluster.getDesiredConfigByType("bar-site")).andReturn(barConfig); - expect(cluster.getDesiredConfigByType("baz-site")).andReturn(bazConfig); - expect(cluster.getService("ZOOKEEPER")).andReturn(zookeeper).anyTimes(); - expect(cluster.getService(anyString(), eq("ZOOKEEPER"))).andReturn(zookeeper).anyTimes(); - expect(cluster.getDesiredConfigByType("foo-type")).andReturn(fooConfig); - expect(cluster.getDesiredConfigByType("bar-type")).andReturn(barConfig); - expect(cluster.getDesiredConfigByType("baz-type")).andReturn(bazConfig); - - // setup the config helper for placeholder resolution - @SuppressWarnings("unchecked") - Provider configHelperProvider = EasyMock.createNiceMock(Provider.class); - ConfigHelper configHelper = EasyMock.createNiceMock(ConfigHelper.class); - - expect(configHelperProvider.get()).andStubReturn(configHelper); - - expect(configHelper.getDefaultProperties(stack211, "ZOOKEEPER")).andReturn( - stack211Configs).anyTimes(); - - expect(configHelper.getDefaultProperties(stack220, "ZOOKEEPER")).andReturn( - stack220Configs).anyTimes(); - - Capture>> expectedConfigurationsCapture = EasyMock.newCapture(); - - configHelper.createConfigTypes(EasyMock.anyObject(Cluster.class), - EasyMock.anyObject(StackId.class), EasyMock.anyObject(AmbariManagementController.class), - EasyMock.capture(expectedConfigurationsCapture), EasyMock.anyObject(String.class), - EasyMock.anyObject(String.class)); - - expectLastCall().once(); - EasyMock.replay(configHelperProvider, configHelper); - - // mock the service config DAO and replay it - ServiceConfigEntity zookeeperServiceConfig = createNiceMock(ServiceConfigEntity.class); - expect(zookeeperServiceConfig.getClusterConfigEntities()).andReturn( - Lists.newArrayList(fooConfigEntity, barConfigEntity, bazConfigEntity)); - - ServiceConfigDAO serviceConfigDAOMock; - serviceConfigDAOMock = EasyMock.createNiceMock(ServiceConfigDAO.class); - - List latestServiceConfigs = Lists.newArrayList(zookeeperServiceConfig); - expect(serviceConfigDAOMock.getLastServiceConfigsForService(EasyMock.anyLong(), - EasyMock.anyLong())).andReturn(latestServiceConfigs).once(); - - replay(serviceConfigDAOMock); - - Map upgradePacks = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - UpgradePack upgradePack = upgradePacks.get("upgrade_to_new_stack"); - - UpgradeContext context = createNiceMock(UpgradeContext.class); - expect(context.getCluster()).andReturn(cluster).atLeastOnce(); - expect(context.getType()).andReturn(UpgradeType.ROLLING).atLeastOnce(); - expect(context.getDirection()).andReturn(Direction.UPGRADE).atLeastOnce(); - expect(context.getRepositoryVersion()).andReturn(repoVersion220).anyTimes(); - expect(context.getSupportedServices()).andReturn(Sets.newHashSet("ZOOKEEPER")).atLeastOnce(); - expect(context.getSourceRepositoryVersion(anyString())).andReturn(repoVersion211).atLeastOnce(); - expect(context.getTargetRepositoryVersion(anyString())).andReturn(repoVersion220).atLeastOnce(); - expect(context.getOrchestrationType()).andReturn(RepositoryType.STANDARD).anyTimes(); - expect(context.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes(); - expect(context.getHostRoleCommandFactory()).andStubReturn(injector.getInstance(HostRoleCommandFactory.class)); - expect(context.getRoleGraphFactory()).andStubReturn(injector.getInstance(RoleGraphFactory.class)); - expect(context.getUpgradePack()).andReturn(upgradePack).atLeastOnce(); - - replayAll(); - - UpgradeHelper upgradeHelper = injector.getInstance(UpgradeHelper.class); - upgradeHelper.m_serviceConfigDAO = serviceConfigDAOMock; - upgradeHelper.m_configHelperProvider = configHelperProvider; - upgradeHelper.updateDesiredRepositoriesAndConfigs(context); - - Map> expectedConfigurations = expectedConfigurationsCapture.getValue(); - Map expectedFooType = expectedConfigurations.get("foo-site"); - Map expectedBarType = expectedConfigurations.get("bar-site"); - Map expectedBazType = expectedConfigurations.get("baz-site"); - - // As the upgrade pack did not have any Flume updates, its configs should - // not be updated. - assertEquals(3, expectedConfigurations.size()); - - // the really important values are one-new and three-changed; one-new - // indicates that the new stack value is changed since it was not customized - // while three-changed represents that the customized value was preserved - // even though the stack value changed - assertEquals("one-new", expectedFooType.get("1")); - assertEquals("one-A", expectedFooType.get("1A")); - assertEquals("two", expectedBarType.get("2")); - assertEquals("three-changed", expectedBazType.get("3")); - } - - @Test - public void testMergeConfigurationsWithClusterEnv() throws Exception { - Cluster cluster = makeCluster(true); - - StackId oldStack = cluster.getDesiredStackVersion(); - StackId newStack = new StackId("HDP-2.5.0"); - - ConfigFactory cf = injector.getInstance(ConfigFactory.class); - - Config clusterEnv = cf.createNew(cluster, "cluster-env", "version1", - ImmutableMap.builder().put("a", "b").build(), - Collections.emptyMap()); - - Config zooCfg = cf.createNew(cluster, "zoo.cfg", "version1", - ImmutableMap.builder().put("c", "d").build(), - Collections.emptyMap()); - - cluster.addDesiredConfig("admin", Sets.newHashSet(clusterEnv, zooCfg)); - - Map> stackMap = new HashMap<>(); - stackMap.put("cluster-env", new HashMap<>()); - stackMap.put("hive-site", new HashMap<>()); - - final Map clusterEnvMap = new HashMap<>(); - - Capture captureCluster = Capture.newInstance(); - Capture captureStackId = Capture.newInstance(); - Capture captureAmc = Capture.newInstance(); - - Capture>> cap = new Capture>>() { - @Override - public void setValue(Map> value) { - if (value.containsKey("cluster-env")) { - clusterEnvMap.putAll(value.get("cluster-env")); - } - } - }; - - Capture captureUsername = Capture.newInstance(); - Capture captureNote = Capture.newInstance(); - - EasyMock.reset(m_configHelper); - expect(m_configHelper.getDefaultProperties(oldStack, "HIVE")).andReturn(stackMap).atLeastOnce(); - expect(m_configHelper.getDefaultProperties(newStack, "HIVE")).andReturn(stackMap).atLeastOnce(); - expect(m_configHelper.getDefaultProperties(oldStack, "ZOOKEEPER")).andReturn(stackMap).atLeastOnce(); - expect(m_configHelper.getDefaultProperties(newStack, "ZOOKEEPER")).andReturn(stackMap).atLeastOnce(); - m_configHelper.createConfigTypes( - EasyMock.capture(captureCluster), - EasyMock.capture(captureStackId), - EasyMock.capture(captureAmc), - EasyMock.capture(cap), - - EasyMock.capture(captureUsername), - EasyMock.capture(captureNote)); - expectLastCall().atLeastOnce(); - - replay(m_configHelper); - - RepositoryVersionEntity repoVersionEntity = helper.getOrCreateRepositoryVersion(new StackId("HDP-2.5.0"), "2.5.0-1234"); - - Map upgradeRequestMap = new HashMap<>(); - upgradeRequestMap.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name()); - upgradeRequestMap.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, repoVersionEntity.getId().toString()); - upgradeRequestMap.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test_HDP-250"); - upgradeRequestMap.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString()); - - UpgradeContextFactory contextFactory = injector.getInstance(UpgradeContextFactory.class); - UpgradeContext context = contextFactory.create(cluster, upgradeRequestMap); - - UpgradeHelper upgradeHelper = injector.getInstance(UpgradeHelper.class); - upgradeHelper.updateDesiredRepositoriesAndConfigs(context); - - assertNotNull(clusterEnvMap); - assertTrue(clusterEnvMap.containsKey("a")); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - @Test - public void testSequentialServiceChecks() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test_checks")); - UpgradePack upgrade = upgrades.get("upgrade_test_checks"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - cluster.deleteService("HDFS"); - cluster.deleteService("YARN"); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, - UpgradeType.ROLLING, repositoryVersion2110); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - assertEquals(5, groups.size()); - - UpgradeGroupHolder serviceCheckGroup = groups.get(2); - assertEquals(ServiceCheckGrouping.class, serviceCheckGroup.groupClass); - assertEquals(3, serviceCheckGroup.items.size()); - - StageWrapper wrapper = serviceCheckGroup.items.get(0); - assertEquals(ServiceCheckGrouping.ServiceCheckStageWrapper.class, wrapper.getClass()); - assertTrue(wrapper.getText().contains("ZooKeeper")); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - @Test - public void testSequentialServiceChecksWithServiceCheckFailure() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test_checks")); - UpgradePack upgrade = upgrades.get("upgrade_test_checks"); - assertNotNull(upgrade); - - // !!! fake skippable so we don't affect other tests - for (Grouping g : upgrade.getAllGroups()) { - if (g.name.equals("SERVICE_CHECK_1") || g.name.equals("SERVICE_CHECK_2")) { - g.skippable = true; - } - } - - Cluster cluster = makeCluster(); - cluster.deleteService("HDFS"); - cluster.deleteService("YARN"); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING, repositoryVersion2110, - RepositoryType.STANDARD, cluster.getServices().keySet(), m_masterHostResolver, false); - expect(context.isServiceCheckFailureAutoSkipped()).andReturn(Boolean.TRUE).atLeastOnce(); - - replay(context); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - assertEquals(5, groups.size()); - - UpgradeGroupHolder serviceCheckGroup = groups.get(2); - assertEquals(ServiceCheckGrouping.class, serviceCheckGroup.groupClass); - assertEquals(4, serviceCheckGroup.items.size()); - - StageWrapper wrapper = serviceCheckGroup.items.get(0); - assertEquals(ServiceCheckGrouping.ServiceCheckStageWrapper.class, wrapper.getClass()); - assertTrue(wrapper.getText().contains("ZooKeeper")); - - wrapper = serviceCheckGroup.items.get(serviceCheckGroup.items.size()-1); - assertTrue(wrapper.getText().equals("Verifying Skipped Failures")); - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - - @Test - public void testPrematureServiceChecks() throws Exception { - Map upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); - assertTrue(upgrades.containsKey("upgrade_test_checks")); - UpgradePack upgrade = upgrades.get("upgrade_test_checks"); - assertNotNull(upgrade); - - Cluster cluster = makeCluster(); - cluster.deleteService("HDFS"); - cluster.deleteService("YARN"); - cluster.deleteService("ZOOKEEPER"); - - UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, - UpgradeType.ROLLING, repositoryVersion2110); - - List groups = m_upgradeHelper.createSequence(upgrade, context); - - assertEquals(3, groups.size()); - - for (UpgradeGroupHolder holder : groups) { - assertFalse(ServiceCheckGrouping.class.equals(holder.groupClass)); - } - - // Do stacks cleanup - stackManagerMock.invalidateCurrentPaths(); - ambariMetaInfo.init(); - } - - - /** - * @param cluster - * @param direction - * @param type - * @return - */ - private UpgradeContext getMockUpgradeContext(Cluster cluster, Direction direction, UpgradeType type){ - return getMockUpgradeContext(cluster, direction, type, repositoryVersion2210); - } - - /** - * @param cluster - * @param direction - * @param type - * @return - */ - private UpgradeContext getMockUpgradeContext(Cluster cluster, Direction direction, - UpgradeType type, RepositoryVersionEntity repositoryVersion) { - Set allServices = cluster.getServices().keySet(); - return getMockUpgradeContext(cluster, direction, type, repositoryVersion, - RepositoryType.STANDARD, allServices); - } - - /** - * @param cluster - * @param direction - * @param type - * @return - */ - private UpgradeContext getMockUpgradeContext(Cluster cluster, Direction direction, - UpgradeType type, RepositoryVersionEntity repositoryVersion, RepositoryType repositoryType, - Set services) { - return getMockUpgradeContext(cluster, direction, type, repositoryVersion, - repositoryType, services, m_masterHostResolver, true); - } - - /** - * @param cluster - * @param direction - * @param type - * @return - */ - private UpgradeContext getMockUpgradeContextNoReplay(Cluster cluster, Direction direction, - UpgradeType type, RepositoryVersionEntity repositoryVersion) { - Set allServices = cluster.getServices().keySet(); - - return getMockUpgradeContext(cluster, direction, type, repositoryVersion, - RepositoryType.STANDARD, allServices, null, false); - } - - /** - * @param cluster - * @param direction - * @param type - * @param repositoryType - * @param services - * @return - */ - private UpgradeContext getMockUpgradeContext(Cluster cluster, Direction direction, - UpgradeType type, RepositoryVersionEntity repositoryVersion, final RepositoryType repositoryType, - Set services, MasterHostResolver resolver, boolean replay) { - UpgradeContext context = EasyMock.createNiceMock(UpgradeContext.class); - expect(context.getCluster()).andReturn(cluster).anyTimes(); - expect(context.getType()).andReturn(type).anyTimes(); - expect(context.getDirection()).andReturn(direction).anyTimes(); - expect(context.getRepositoryVersion()).andReturn(repositoryVersion).anyTimes(); - expect(context.getSupportedServices()).andReturn(services).anyTimes(); - expect(context.getOrchestrationType()).andReturn(repositoryType).anyTimes(); - expect(context.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes(); - expect(context.getHostRoleCommandFactory()).andStubReturn( - injector.getInstance(HostRoleCommandFactory.class)); - expect(context.getRoleGraphFactory()).andStubReturn( - injector.getInstance(RoleGraphFactory.class)); - - // only set this if supplied - if (null != resolver) { - expect(context.getResolver()).andReturn(resolver).anyTimes(); - } - - final Map targetRepositoryVersions = new HashMap<>(); - for( String serviceName : services ){ - targetRepositoryVersions.put(serviceName, repositoryVersion); - } - - final Capture repoVersionServiceName = EasyMock.newCapture(); - expect( - context.getTargetRepositoryVersion(EasyMock.capture(repoVersionServiceName))).andAnswer( - new IAnswer() { - @Override - public RepositoryVersionEntity answer() { - return targetRepositoryVersions.get(repoVersionServiceName.getValue()); - } - }).anyTimes(); - - final Capture serviceNameSupported = EasyMock.newCapture(); - expect(context.isServiceSupported(EasyMock.capture(serviceNameSupported))).andAnswer( - new IAnswer() { - @Override - public Boolean answer() { - return targetRepositoryVersions.containsKey(serviceNameSupported.getValue()); - } - }).anyTimes(); - - - final Map serviceNames = new HashMap<>(); - final Capture serviceDisplayNameArg1 = EasyMock.newCapture(); - final Capture serviceDisplayNameArg2 = EasyMock.newCapture(); - - context.setServiceDisplay(EasyMock.capture(serviceDisplayNameArg1), EasyMock.capture(serviceDisplayNameArg2)); - expectLastCall().andAnswer( - new IAnswer() { - @Override - public Object answer() { - serviceNames.put(serviceDisplayNameArg1.getValue(), serviceDisplayNameArg2.getValue()); - return null; - } - }).anyTimes(); - - - final Map componentNames = new HashMap<>(); - final Capture componentDisplayNameArg1 = EasyMock.newCapture(); - final Capture componentDisplayNameArg2 = EasyMock.newCapture(); - final Capture componentDisplayNameArg3 = EasyMock.newCapture(); - - context.setComponentDisplay(EasyMock.capture(componentDisplayNameArg1), - EasyMock.capture(componentDisplayNameArg2), EasyMock.capture(componentDisplayNameArg3)); - - expectLastCall().andAnswer(new IAnswer() { - @Override - public Object answer() { - componentNames.put( - componentDisplayNameArg1.getValue() + ":" + componentDisplayNameArg2.getValue(), - componentDisplayNameArg3.getValue()); - return null; - } - }).anyTimes(); - - final Capture getServiceDisplayArgument1 = EasyMock.newCapture(); - expect( - context.getServiceDisplay(EasyMock.capture(getServiceDisplayArgument1))).andAnswer( - new IAnswer() { - @Override - public String answer() { - return serviceNames.get(getServiceDisplayArgument1.getValue()); - } - }).anyTimes(); - - final Capture getComponentDisplayArgument1 = EasyMock.newCapture(); - final Capture getComponentDisplayArgument2 = EasyMock.newCapture(); - expect(context.getComponentDisplay(EasyMock.capture(getComponentDisplayArgument1), - EasyMock.capture(getComponentDisplayArgument2))).andAnswer(new IAnswer() { - @Override - public String answer() { - return componentNames.get(getComponentDisplayArgument1.getValue() + ":" - + getComponentDisplayArgument2.getValue()); - } - }).anyTimes(); - - final Capture isScopedCapture = EasyMock.newCapture(); - expect(context.isScoped(EasyMock.capture(isScopedCapture))).andStubAnswer( - new IAnswer() { - @Override - public Boolean answer() throws Throwable { - UpgradeScope scope = isScopedCapture.getValue(); - if (scope == UpgradeScope.ANY) { - return true; - } - - if (scope == UpgradeScope.PARTIAL) { - return repositoryType != RepositoryType.STANDARD; - } - - return repositoryType == RepositoryType.STANDARD; - } - }); - - if (replay) { - replay(context); - } - - return context; - } - - /** - * Extend {@link org.apache.ambari.server.stack.MasterHostResolver} in order - * to overwrite the JMX methods. - */ - private class MockMasterHostResolver extends MasterHostResolver { - - public MockMasterHostResolver(Cluster cluster, ConfigHelper configHelper, UpgradeContext context) { - super(cluster, configHelper, context); - } - - /** - * Mock the call to get JMX Values. - * @param hostname host name - * @param port port number - * @param beanName if asQuery is false, then search for this bean name - * @param attributeName if asQuery is false, then search for this attribute name - * @param asQuery whether to search bean or query - * @param encrypted true if using https instead of http. - * @return - */ - @Override - public String queryJmxBeanValue(String hostname, int port, String beanName, String attributeName, - boolean asQuery, boolean encrypted) { - - if (beanName.equalsIgnoreCase("Hadoop:service=NameNode,name=NameNodeStatus") && attributeName.equalsIgnoreCase("State") && asQuery) { - switch (hostname) { - case "H1": - return Status.ACTIVE.toString(); - case "H2": - return Status.STANDBY.toString(); - case "H3": - return Status.ACTIVE.toString(); - case "H4": - return Status.STANDBY.toString(); - - default: - return "UNKNOWN_NAMENODE_STATUS_FOR_THIS_HOST"; - } - } - return "NOT_MOCKED"; - } - } - - private class MockModule implements Module { - - @Override - public void configure(Binder binder) { - binder.install(new FactoryModuleBuilder().build(UpgradeContextFactory.class)); - binder.bind(ConfigHelper.class).toInstance(m_configHelper); - } - } - - private static class BadMasterHostResolver extends MasterHostResolver { - - public BadMasterHostResolver(Cluster cluster, ConfigHelper configHelper, UpgradeContext context) { - super(cluster, configHelper, context); - } - - @Override - protected String queryJmxBeanValue(String hostname, int port, String beanName, - String attributeName, boolean asQuery, boolean encrypted) { - return null; - } - - } -}