From 9e68a578328e284dcdf845ce32a0c48ac8dba36c Mon Sep 17 00:00:00 2001 From: sandeep Date: Mon, 25 Jul 2016 12:06:59 +0530 Subject: [PATCH 1/8] FALCON-298. Feed update with replication delay creates holes --- .../java/org/apache/falcon/entity/EntityUtil.java | 12 ++++++++++++ .../falcon/workflow/engine/OozieWorkflowEngine.java | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/common/src/main/java/org/apache/falcon/entity/EntityUtil.java b/common/src/main/java/org/apache/falcon/entity/EntityUtil.java index 51172f244..aef1fd5af 100644 --- a/common/src/main/java/org/apache/falcon/entity/EntityUtil.java +++ b/common/src/main/java/org/apache/falcon/entity/EntityUtil.java @@ -462,6 +462,18 @@ public static Date getNextInstanceTime(Date instanceTime, Frequency frequency, T return insCal.getTime(); } + public static Date getNextInstanceTimeWithDelay(Date instanceTime, Frequency delay, TimeZone tz) { + if (tz == null) { + tz = TimeZone.getTimeZone("UTC"); + } + Calendar insCal = Calendar.getInstance(tz); + insCal.setTime(instanceTime); + final int delayAmount = delay.getFrequencyAsInt(); + insCal.add(delay.getTimeUnit().getCalendarUnit(), delayAmount); + + return insCal.getTime(); + } + public static String md5(Entity entity) throws FalconException { return new String(Hex.encodeHex(DigestUtils.md5(stringOf(entity)))); } diff --git a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java index 9a09f18aa..b879f87be 100644 --- a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java +++ b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java @@ -1393,7 +1393,7 @@ private void updateCoords(String cluster, BundleJob bundle, //calculate next start time based on delay. endTime = (delay == null) ? endTime - : EntityUtil.getNextStartTime(coord.getStartTime(), delay, EntityUtil.getTimeZone(entity), endTime); + : EntityUtil.getNextInstanceTimeWithDelay(endTime, delay, EntityUtil.getTimeZone(entity)); LOG.debug("Updating endtime of coord {} to {} on cluster {}", coord.getId(), SchemaHelper.formatDateUTC(endTime), cluster); From a94d4fe07f0da2606fc1c9009ba9a2d62643b728 Mon Sep 17 00:00:00 2001 From: sandeep Date: Fri, 5 Aug 2016 16:27:34 +0530 Subject: [PATCH 2/8] rebasing from master --- Installation-steps.txt | 24 +- NOTICE.txt | 2 +- .../main/META/hdfs-mirroring-properties.json | 84 ++++ .../runtime/hdfs-mirroring-workflow.xml | 28 ++ .../hdfs-snapshot-mirroring-properties.json | 78 +++- .../hdfs-snapshot-mirroring-workflow.xml | 36 +- .../main/META/hive-mirroring-properties.json | 16 +- .../hive-mirroring-secure-workflow.xml | 12 + .../runtime/hive-mirroring-workflow.xml | 12 + .../replication/HdfsSnapshotReplicator.java | 99 +++-- .../HdfsSnapshotReplicatorTest.java | 7 +- .../org/apache/falcon/hive/HiveDRArgs.java | 2 + .../org/apache/falcon/hive/HiveDRTool.java | 6 +- .../falcon/hive/mapreduce/CopyReducer.java | 3 +- .../apache/falcon/hive/util/EventUtils.java | 55 ++- .../apache/falcon/hive/util/FileUtils.java | 12 +- .../falcon/hive/util/HiveDRStatusStore.java | 24 +- .../falcon/hive/util/EventUtilsTest.java | 71 ++++ cli/pom.xml | 9 - .../org/apache/falcon/cli/FalconAdminCLI.java | 9 +- .../java/org/apache/falcon/cli/FalconCLI.java | 117 +----- .../apache/falcon/cli/FalconEntityCLI.java | 168 ++++---- .../apache/falcon/cli/FalconExtensionCLI.java | 2 +- .../apache/falcon/cli/FalconInstanceCLI.java | 240 ++++++------ .../apache/falcon/cli/FalconMetadataCLI.java | 177 ++++----- .../cli/commands/FalconInstanceCommands.java | 29 -- .../org/apache/falcon/FalconCLIConstants.java | 81 ---- .../org/apache/falcon/ValidationUtil.java | 199 ++++++++++ .../falcon/client/AbstractFalconClient.java | 32 ++ .../falcon/client/FalconCLIConstants.java | 220 +++++++++++ .../apache/falcon/client/FalconClient.java | 6 +- common/pom.xml | 11 +- .../falcon/metadata/GraphUpdateUtils.java | 113 ++++++ .../metadata/MetadataMappingService.java | 4 +- .../falcon/persistence/BacklogMetricBean.java | 116 ++++++ .../persistence/PersistenceConstants.java | 2 + .../falcon/tools/FalconStateStoreDBCLI.java | 1 + .../apache/falcon/util/DistCPOptionsUtil.java | 153 ++++++++ .../falcon/util/ReplicationDistCpOption.java | 8 +- .../engine/AbstractWorkflowEngine.java | 2 + .../main/resources/META-INF/persistence.xml | 9 +- common/src/main/resources/startup.properties | 4 + .../parser/ProcessEntityParserTest.java | 2 +- .../resources/config/process/process-0.1.xml | 2 +- docs/src/site/twiki/EntitySpecification.twiki | 12 +- docs/src/site/twiki/InstallationSteps.twiki | 15 +- .../site/twiki/MigrationInstructions.twiki | 29 +- .../falcon/extensions/AbstractExtension.java | 12 + .../hdfs/HdfsMirroringExtension.java | 16 +- .../HdfsMirroringExtensionProperties.java | 7 +- .../HdfsSnapshotMirrorProperties.java | 4 +- .../HdfsSnapshotMirroringExtension.java | 6 +- .../hive/HiveMirroringExtension.java | 18 +- .../HiveMirroringExtensionProperties.java | 2 + .../falcon/extensions/ExtensionTest.java | 2 +- .../feed/FSReplicationWorkflowBuilder.java | 1 + .../feed/FeedReplicationWorkflowBuilder.java | 20 +- .../workflow/engine/OozieWorkflowEngine.java | 26 +- pom.xml | 7 +- .../falcon/jdbc/BacklogMetricStore.java | 121 ++++++ .../falcon/resource/channel/HTTPChannel.java | 5 +- .../service/BacklogMetricEmitterService.java | 356 +++++++++++++++++ .../falcon/service/EntitySLAAlertService.java | 6 +- .../falcon/service/EntitySLAListener.java | 3 +- .../org/apache/falcon/util/MetricInfo.java | 79 ++++ .../BacklogMetricEmitterServiceTest.java | 133 +++++++ prism/src/test/resources/startup.properties | 338 +++++++++++++++++ release-docs/0.10/CHANGES.0.10.md | 16 +- .../falcon/replication/FeedReplicator.java | 92 ++--- .../replication/FeedReplicatorTest.java | 20 +- .../workflow/engine/FalconWorkflowEngine.java | 20 +- shell/pom.xml | 196 ++++++++++ .../shell}/commands/BaseFalconCommands.java | 27 +- .../shell/commands/FalconAdminCommands.java | 64 ++++ .../commands/FalconConnectionCommands.java | 6 +- .../shell}/commands/FalconEntityCommands.java | 190 +++++----- .../commands/FalconInstanceCommands.java | 358 ++++++++++++++++++ .../commands/FalconMetadataCommands.java | 162 ++++++++ .../shell/commands/FalconProfileCommands.java | 86 +++++ .../falcon/shell}/skel/FalconBanner.java | 5 +- .../skel/FalconHistoryFileProvider.java | 2 +- .../shell}/skel/FalconPromptProvider.java | 2 +- .../META-INF/spring/spring-shell-plugin.xml | 8 +- shell/src/main/resources/shell.properties | 25 ++ .../test}/FalconConnectionCommandsTest.java | 2 +- src/bin/falcon-shell | 39 ++ src/bin/graphdbutil.sh | 118 ++++++ src/build/findbugs-exclude.xml | 6 + src/conf/shell.properties | 25 ++ src/conf/startup.properties | 16 + src/main/assemblies/standalone-package.xml | 6 + titan/pom.xml | 5 - .../apache/falcon/unit/FalconUnitClient.java | 71 ++++ .../org/apache/falcon/cli/FalconCLIIT.java | 2 +- .../falcon/resource/ExtensionManagerIT.java | 5 +- .../InstanceSchedulerManagerJerseyIT.java | 13 +- .../resource/ProcessInstanceManagerIT.java | 17 +- .../apache/falcon/resource/TestContext.java | 2 +- 98 files changed, 4219 insertions(+), 892 deletions(-) create mode 100644 addons/hivedr/src/test/java/org/apache/falcon/hive/util/EventUtilsTest.java delete mode 100644 cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java delete mode 100644 client/src/main/java/org/apache/falcon/FalconCLIConstants.java create mode 100644 client/src/main/java/org/apache/falcon/ValidationUtil.java create mode 100644 client/src/main/java/org/apache/falcon/client/FalconCLIConstants.java create mode 100644 common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java create mode 100644 common/src/main/java/org/apache/falcon/persistence/BacklogMetricBean.java create mode 100644 common/src/main/java/org/apache/falcon/util/DistCPOptionsUtil.java create mode 100644 prism/src/main/java/org/apache/falcon/jdbc/BacklogMetricStore.java create mode 100644 prism/src/main/java/org/apache/falcon/service/BacklogMetricEmitterService.java create mode 100644 prism/src/main/java/org/apache/falcon/util/MetricInfo.java create mode 100644 prism/src/test/java/org/apache/falcon/service/BacklogMetricEmitterServiceTest.java create mode 100644 prism/src/test/resources/startup.properties create mode 100644 shell/pom.xml rename {cli/src/main/java/org/apache/falcon/cli => shell/src/main/java/org/apache/falcon/shell}/commands/BaseFalconCommands.java (86%) create mode 100644 shell/src/main/java/org/apache/falcon/shell/commands/FalconAdminCommands.java rename {cli/src/main/java/org/apache/falcon/cli => shell/src/main/java/org/apache/falcon/shell}/commands/FalconConnectionCommands.java (91%) rename {cli/src/main/java/org/apache/falcon/cli => shell/src/main/java/org/apache/falcon/shell}/commands/FalconEntityCommands.java (67%) create mode 100644 shell/src/main/java/org/apache/falcon/shell/commands/FalconInstanceCommands.java create mode 100644 shell/src/main/java/org/apache/falcon/shell/commands/FalconMetadataCommands.java create mode 100644 shell/src/main/java/org/apache/falcon/shell/commands/FalconProfileCommands.java rename {cli/src/main/java/org/apache/falcon/cli => shell/src/main/java/org/apache/falcon/shell}/skel/FalconBanner.java (89%) rename {cli/src/main/java/org/apache/falcon/cli => shell/src/main/java/org/apache/falcon/shell}/skel/FalconHistoryFileProvider.java (97%) rename {cli/src/main/java/org/apache/falcon/cli => shell/src/main/java/org/apache/falcon/shell}/skel/FalconPromptProvider.java (97%) rename {cli => shell}/src/main/resources/META-INF/spring/spring-shell-plugin.xml (84%) create mode 100644 shell/src/main/resources/shell.properties rename {cli/src/test/java/org/apache/falcon/cli/commands => shell/src/test}/FalconConnectionCommandsTest.java (97%) create mode 100644 src/bin/falcon-shell create mode 100644 src/bin/graphdbutil.sh create mode 100644 src/conf/shell.properties diff --git a/Installation-steps.txt b/Installation-steps.txt index b86d6a1c8..84f0c996c 100644 --- a/Installation-steps.txt +++ b/Installation-steps.txt @@ -41,10 +41,12 @@ a. Building falcon from the source release * export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m -noverify" && mvn clean install [optionally -Dhadoop.version=<> can be appended to build for a specific version of hadoop] -*Note:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards - Falcon build with JDK 1.7 using -noverify option - To compile Falcon with Hive Replication, optionally "-P hadoop-2,hivedr" can be appended. For this - Hive >= 1.2.0 and Oozie >= 4.2.0 should be available. +*Note 1:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards + Falcon build with JDK 1.7 using -noverify option +*Note 2:* To compile Falcon with addon extensions, append additional profiles to build command using syntax -P<> + For Hive Mirroring extension, use profile"hivedr". Hive >= 1.2.0 and Oozie >= 4.2.0 is required + For HDFS Snapshot mirroring extension, use profile "hdfs-snapshot-mirroring". Hadoop >= 2.7.0 is required + For ADF integration, use profile "adf" b. Building falcon from the source repository @@ -55,10 +57,12 @@ b. Building falcon from the source repository * export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m -noverify" && mvn clean install [optionally -Dhadoop.version=<> can be appended to build for a specific version of hadoop] -*Note:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards - Falcon build with JDK 1.7 using -noverify option - To compile Falcon with Hive Replication, optionally "-P hadoop-2,hivedr" can be appended. For this - Hive >= 1.2.0 and Oozie >= 4.2.0 should be available. +*Note 1:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards + Falcon build with JDK 1.7 using -noverify option +*Note 2:* To compile Falcon with addon extensions, append additional profiles to build command using syntax -P<> + For Hive Mirroring extension, use profile"hivedr". Hive >= 1.2.0 and Oozie >= 4.2.0 is required + For HDFS Snapshot mirroring extension, use profile "hdfs-snapshot-mirroring". Hadoop >= 2.7.0 is required + For ADF integration, use profile "adf" 2. Deploying Falcon @@ -118,8 +122,8 @@ c. Using Falcon ~~~~~~~~~~~~~~~ * bin/falcon admin -version - Falcon server build version: {Version:"0.3-SNAPSHOT-rd7e2be9afa2a5dc96acd1ec9e325f39c6b2f17f7",Mode:"embedded"} - +Falcon server build version: {"properties":[{"key":"Version","value":"0.10-rbe02edf0d5b10af27bbac694e536bef30885c00e"}, +{"key":"Mode","value":"embedded"},{"key":"authentication","value":"simple"},{"key":"safemode","value":"false"}]} * bin/falcon help (for more details about falcon cli usage) diff --git a/NOTICE.txt b/NOTICE.txt index c9259e8cc..de9847f8c 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,6 +1,6 @@ Apache Falcon -Copyright 2011-2015 The Apache Software Foundation +Copyright 2011-2016 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json b/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json index f1b477559..9d4a425c0 100644 --- a/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json +++ b/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json @@ -121,6 +121,90 @@ "description":"Bandwidth in MB for each mapper in DistCP", "example":"100" }, + { + "propertyName":"tdeEncryptionEnabled", + "required":false, + "description":"Set this flag to true if TDE encryption is enabled on source and target. Default value is false", + "example":"true" + }, + { + "propertyName":"overwrite", + "required":false, + "description":"DitcCP overwrites target files even if they exist at the source, or have the same contents", + "example":"true" + }, + { + "propertyName":"ignoreErrors", + "required":false, + "description":"Ignore failures during DistCp", + "example":"true" + }, + { + "propertyName":"skipChecksum", + "required":false, + "description":"Skip checksum errors during DistCP", + "example":"true" + }, + { + "propertyName":"removeDeletedFiles", + "required":false, + "description":"DistCP deletes the files existing in the dst but not in src", + "example":"true" + }, + { + "propertyName":"preserveBlockSize", + "required":false, + "description":"Preserve block size during DistCP", + "example":"true" + }, + { + "propertyName":"preserveReplicationNumber", + "required":false, + "description":"Preserve replication number during DistCP", + "example":"false" + }, + { + "propertyName":"preservePermission", + "required":false, + "description":"Preserve permission during DistCP", + "example":"true" + }, + { + "propertyName":"preserveUser", + "required":false, + "description":"Preserve user during DistCP", + "example":"true" + }, + { + "propertyName":"preserveGroup", + "required":false, + "description":"Preserve group during DistCP", + "example":"true" + }, + { + "propertyName":"preserveChecksumType", + "required":false, + "description":"Preserve checksum type during DistCP", + "example":"true" + }, + { + "propertyName":"preserveAcl", + "required":false, + "description":"Preserve ACL during DistCP", + "example":"false" + }, + { + "propertyName":"preserveXattr", + "required":false, + "description":"Preserve Xattr during DistCP", + "example":"true" + }, + { + "propertyName":"preserveTimes", + "required":false, + "description":"Preserve access and modification times during DistCP", + "example":"true" + }, { "propertyName":"jobNotificationType", "required":false, diff --git a/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml b/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml index c0504fb73..7929dd75a 100644 --- a/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml +++ b/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml @@ -63,6 +63,32 @@ ${distcpMaxMaps} -mapBandwidth ${distcpMapBandwidth} + -overwrite + ${overwrite} + -ignoreErrors + ${ignoreErrors} + -skipChecksum + ${skipChecksum} + -removeDeletedFiles + ${removeDeletedFiles} + -preserveBlockSize + ${preserveBlockSize} + -preserveReplicationNumber + ${preserveReplicationNumber} + -preservePermission + ${preservePermission} + -preserveUser + ${preserveUser} + -preserveGroup + ${preserveGroup} + -preserveChecksumType + ${preserveChecksumType} + -preserveAcl + ${preserveAcl} + -preserveXattr + ${preserveXattr} + -preserveTimes + ${preserveTimes} -sourcePaths ${sourceDir} -targetPath @@ -71,6 +97,8 @@ FILESYSTEM -availabilityFlag ${availabilityFlag == 'NA' ? "NA" : availabilityFlag} + -tdeEncryptionEnabled + ${tdeEncryptionEnabled} -counterLogDir ${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName} diff --git a/addons/extensions/hdfs-snapshot-mirroring/src/main/META/hdfs-snapshot-mirroring-properties.json b/addons/extensions/hdfs-snapshot-mirroring/src/main/META/hdfs-snapshot-mirroring-properties.json index 46554c158..430f1ec31 100644 --- a/addons/extensions/hdfs-snapshot-mirroring/src/main/META/hdfs-snapshot-mirroring-properties.json +++ b/addons/extensions/hdfs-snapshot-mirroring/src/main/META/hdfs-snapshot-mirroring-properties.json @@ -140,13 +140,13 @@ "example":"10" }, { - "propertyName":"distcpMaxMaps", + "propertyName":"maxMaps", "required":false, "description":"Maximum number of mappers for DistCP", "example":"1" }, { - "propertyName":"distcpMapBandwidth", + "propertyName":"mapBandwidth", "required":false, "description":"Bandwidth in MB for each mapper in DistCP", "example":"100" @@ -157,6 +157,78 @@ "description":"Specify if TDE based encryption is enabled on source and target dirs", "example":"false" }, + { + "propertyName":"ignoreErrors", + "required":false, + "description":"Ignore failures during DistCp", + "example":"true" + }, + { + "propertyName":"skipChecksum", + "required":false, + "description":"Skip checksum errors during DistCP", + "example":"true" + }, + { + "propertyName":"removeDeletedFiles", + "required":false, + "description":"DistCP deletes the files existing in the dst but not in src", + "example":"true" + }, + { + "propertyName":"preserveBlockSize", + "required":false, + "description":"Preserve block size during DistCP", + "example":"true" + }, + { + "propertyName":"preserveReplicationNumber", + "required":false, + "description":"Preserve replication number during DistCP", + "example":"false" + }, + { + "propertyName":"preservePermission", + "required":false, + "description":"Preserve permission during DistCP", + "example":"true" + }, + { + "propertyName":"preserveUser", + "required":false, + "description":"Preserve user during DistCP", + "example":"true" + }, + { + "propertyName":"preserveGroup", + "required":false, + "description":"Preserve group during DistCP", + "example":"true" + }, + { + "propertyName":"preserveChecksumType", + "required":false, + "description":"Preserve checksum type during DistCP", + "example":"true" + }, + { + "propertyName":"preserveAcl", + "required":false, + "description":"Preserve ACL during DistCP", + "example":"false" + }, + { + "propertyName":"preserveXattr", + "required":false, + "description":"Preserve Xattr during DistCP", + "example":"true" + }, + { + "propertyName":"preserveTimes", + "required":false, + "description":"Preserve access and modification times during DistCP", + "example":"true" + }, { "propertyName":"jobNotificationType", "required":false, @@ -170,4 +242,4 @@ "example":"user1@gmail.com, user2@gmail.com" } ] -} \ No newline at end of file +} diff --git a/addons/extensions/hdfs-snapshot-mirroring/src/main/resources/runtime/hdfs-snapshot-mirroring-workflow.xml b/addons/extensions/hdfs-snapshot-mirroring/src/main/resources/runtime/hdfs-snapshot-mirroring-workflow.xml index c735167b5..899f6b06e 100644 --- a/addons/extensions/hdfs-snapshot-mirroring/src/main/resources/runtime/hdfs-snapshot-mirroring-workflow.xml +++ b/addons/extensions/hdfs-snapshot-mirroring/src/main/resources/runtime/hdfs-snapshot-mirroring-workflow.xml @@ -59,10 +59,36 @@ org.apache.falcon.snapshots.replication.HdfsSnapshotReplicator -Dmapred.job.queue.name=${queueName} -Dmapred.job.priority=${jobPriority} - -distcpMaxMaps - ${distcpMaxMaps} - -distcpMapBandwidth - ${distcpMapBandwidth} + -maxMaps + ${maxMaps} + -mapBandwidth + ${mapBandwidth} + -overwrite + ${overwrite} + -ignoreErrors + ${ignoreErrors} + -skipChecksum + ${skipChecksum} + -removeDeletedFiles + ${removeDeletedFiles} + -preserveBlockSize + ${preserveBlockSize} + -preserveReplicationNumber + ${preserveReplicationNumber} + -preservePermission + ${preservePermission} + -preserveUser + ${preserveUser} + -preserveGroup + ${preserveGroup} + -preserveChecksumType + ${preserveChecksumType} + -preserveAcl + ${preserveAcl} + -preserveXattr + ${preserveXattr} + -preserveTimes + ${preserveTimes} -sourceNN ${sourceNN} -sourceExecUrl @@ -169,4 +195,4 @@ - \ No newline at end of file + diff --git a/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json b/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json index e019e688f..686ce9404 100644 --- a/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json +++ b/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json @@ -94,9 +94,15 @@ { "propertyName":"sourceHiveServer2Uri", "required":true, - "description":"Hive2 server end point", + "description":"Hive2 server end point. If Zookeeper discovery mode is enabled zookeeper_ensemble is expected", "example":"hive2://localhost:10000" }, + { + "propertyName":"sourceHiveServer2ExtraOpts", + "required":false, + "description":"Extra opts required when SSL is enbaled, Http mode and when zookeeper discovery is used", + "example":"serviceDiscoveryMode=zooKeeper; zooKeeperNamespace=" + }, { "propertyName":"sourceDatabases", "required":true, @@ -130,9 +136,15 @@ { "propertyName":"targetHiveServer2Uri", "required":true, - "description":"Hive2 server end point", + "description":"Hive2 server end point. If Zookeeper discovery mode is enabled zookeeper_ensemble is expected", "example":"hive2://localhost:10000" }, + { + "propertyName":"targetHiveServer2ExtraOpts", + "required":false, + "description":"Extra opts required when SSL is enbaled, Http mode and when zookeeper discovery is used", + "example":"serviceDiscoveryMode=zooKeeper; zooKeeperNamespace=" + }, { "propertyName":"targetStagingPath", "required":false, diff --git a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml index 63e9a6772..6ccea3a70 100644 --- a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml +++ b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml @@ -102,6 +102,8 @@ ${sourceMetastoreUri} -sourceHiveServer2Uri ${sourceHiveServer2Uri} + -sourceHiveServer2ExtraOpts + ${sourceHiveServer2ExtraOpts} -sourceDatabases ${sourceDatabases} -sourceTables @@ -122,6 +124,8 @@ ${targetMetastoreUri} -targetHiveServer2Uri ${targetHiveServer2Uri} + -targetHiveServer2ExtraOpts + ${targetHiveServer2ExtraOpts} -targetStagingPath ${targetStagingPath} -targetNN @@ -200,6 +204,8 @@ ${sourceMetastoreUri} -sourceHiveServer2Uri ${sourceHiveServer2Uri} + -sourceHiveServer2ExtraOpts + ${sourceHiveServer2ExtraOpts} -sourceDatabases ${sourceDatabases} -sourceTables @@ -220,6 +226,8 @@ ${targetMetastoreUri} -targetHiveServer2Uri ${targetHiveServer2Uri} + -targetHiveServer2ExtraOpts + ${targetHiveServer2ExtraOpts} -targetStagingPath ${targetStagingPath} -targetNN @@ -302,6 +310,8 @@ ${sourceMetastoreUri} -sourceHiveServer2Uri ${sourceHiveServer2Uri} + -sourceHiveServer2ExtraOpts + ${sourceHiveServer2ExtraOpts} -sourceDatabases ${sourceDatabases} -sourceTables @@ -322,6 +332,8 @@ ${targetMetastoreUri} -targetHiveServer2Uri ${targetHiveServer2Uri} + -targetHiveServer2ExtraOpts + ${targetHiveServer2ExtraOpts} -targetStagingPath ${targetStagingPath} -targetNN diff --git a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml index 4f6eec56d..5336bdae3 100644 --- a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml +++ b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml @@ -52,6 +52,8 @@ ${sourceMetastoreUri} -sourceHiveServer2Uri ${sourceHiveServer2Uri} + -sourceHiveServer2ExtraOpts + ${sourceHiveServer2ExtraOpts} -sourceDatabases ${sourceDatabases} -sourceTables @@ -66,6 +68,8 @@ ${targetMetastoreUri} -targetHiveServer2Uri ${targetHiveServer2Uri} + -targetHiveServer2ExtraOpts + ${targetHiveServer2ExtraOpts} -targetStagingPath ${targetStagingPath} -targetNN @@ -128,6 +132,8 @@ ${sourceMetastoreUri} -sourceHiveServer2Uri ${sourceHiveServer2Uri} + -sourceHiveServer2ExtraOpts + ${sourceHiveServer2ExtraOpts} -sourceDatabases ${sourceDatabases} -sourceTables @@ -142,6 +148,8 @@ ${targetMetastoreUri} -targetHiveServer2Uri ${targetHiveServer2Uri} + -targetHiveServer2ExtraOpts + ${targetHiveServer2ExtraOpts} -targetStagingPath ${targetStagingPath} -targetNN @@ -208,6 +216,8 @@ ${sourceMetastoreUri} -sourceHiveServer2Uri ${sourceHiveServer2Uri} + -sourceHiveServer2ExtraOpts + ${sourceHiveServer2ExtraOpts} -sourceDatabases ${sourceDatabases} -sourceTables @@ -222,6 +232,8 @@ ${targetMetastoreUri} -targetHiveServer2Uri ${targetHiveServer2Uri} + -targetHiveServer2ExtraOpts + ${targetHiveServer2ExtraOpts} -targetStagingPath ${targetStagingPath} -targetNN diff --git a/addons/hdfs-snapshot-mirroring/src/main/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicator.java b/addons/hdfs-snapshot-mirroring/src/main/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicator.java index 6f5defe2e..cb597fed1 100644 --- a/addons/hdfs-snapshot-mirroring/src/main/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicator.java +++ b/addons/hdfs-snapshot-mirroring/src/main/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicator.java @@ -28,6 +28,8 @@ import org.apache.falcon.extensions.mirroring.hdfsSnapshot.HdfsSnapshotMirrorProperties; import org.apache.falcon.hadoop.HadoopClientFactory; import org.apache.falcon.snapshots.util.HdfsSnapshotUtil; +import org.apache.falcon.util.DistCPOptionsUtil; +import org.apache.falcon.util.ReplicationDistCpOption; import org.apache.falcon.workflow.util.OozieActionConfigurationHelper; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -138,17 +140,14 @@ protected void invokeCopy(String sourceStorageUrl, String targetStorageUrl, private DistCpOptions getDistCpOptions(String sourceStorageUrl, String targetStorageUrl, DistributedFileSystem sourceFs, DistributedFileSystem targetFs, String sourceDir, String targetDir, - String currentSnapshotName) throws FalconException { + String currentSnapshotName) throws FalconException, IOException { - List sourceUris=new ArrayList(); + List sourceUris = new ArrayList<>(); sourceUris.add(new Path(getStagingUri(sourceStorageUrl, sourceDir))); - DistCpOptions distcpOptions = new DistCpOptions(sourceUris, - new Path(getStagingUri(targetStorageUrl, targetDir))); - // Settings needed for Snapshot distCp. - distcpOptions.setSyncFolder(true); - distcpOptions.setDeleteMissing(true); + DistCpOptions distcpOptions = DistCPOptionsUtil.getDistCpOptions(cmd, sourceUris, + new Path(getStagingUri(targetStorageUrl, targetDir)), true, null); // Use snapshot diff if two snapshots exist. Else treat it as simple distCp. // get latest replicated snapshot. @@ -157,24 +156,14 @@ private DistCpOptions getDistCpOptions(String sourceStorageUrl, String targetSto distcpOptions.setUseDiff(true, replicatedSnapshotName, currentSnapshotName); } - if (Boolean.valueOf(cmd.getOptionValue(HdfsSnapshotMirrorProperties.TDE_ENCRYPTION_ENABLED.getName()))) { - // skipCRCCheck and update enabled - distcpOptions.setSkipCRC(true); - } - - distcpOptions.setBlocking(true); - distcpOptions.setMaxMaps( - Integer.parseInt(cmd.getOptionValue(HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName()))); - distcpOptions.setMapBandwidth( - Integer.parseInt(cmd.getOptionValue(HdfsSnapshotMirrorProperties.MAP_BANDWIDTH_IN_MB.getName()))); return distcpOptions; } private String findLatestReplicatedSnapshot(DistributedFileSystem sourceFs, DistributedFileSystem targetFs, - String sourceDir, String targetDir) throws FalconException { + String sourceDir, String targetDir) throws FalconException { try { FileStatus[] sourceSnapshots = sourceFs.listStatus(new Path(getSnapshotDir(sourceDir))); - Set sourceSnapshotNames = new HashSet(); + Set sourceSnapshotNames = new HashSet<>(); for (FileStatus snapshot : sourceSnapshots) { sourceSnapshotNames.add(snapshot.getPath().getName()); } @@ -190,8 +179,8 @@ public int compare(FileStatus f1, FileStatus f2) { }); // get most recent snapshot name that exists in source. - for (int i = 0; i < targetSnapshots.length; i++) { - String name = targetSnapshots[i].getPath().getName(); + for (FileStatus targetSnapshot : targetSnapshots) { + String name = targetSnapshot.getPath().getName(); if (sourceSnapshotNames.contains(name)) { return name; } @@ -219,7 +208,7 @@ private String getSnapshotDir(String dirName) { protected CommandLine getCommand(String[] args) throws FalconException { Options options = new Options(); - Option opt = new Option(HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName(), + Option opt = new Option(HdfsSnapshotMirrorProperties.MAX_MAPS.getName(), true, "max number of maps to use for distcp"); opt.setRequired(true); options.addOption(opt); @@ -270,11 +259,75 @@ protected CommandLine getCommand(String[] args) throws FalconException { opt.setRequired(true); options.addOption(opt); + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_OVERWRITE.getName(), true, "option to force overwrite"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_IGNORE_ERRORS.getName(), true, "abort on error"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_SKIP_CHECKSUM.getName(), true, "skip checksums"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_REMOVE_DELETED_FILES.getName(), true, + "remove deleted files - should there be files in the target directory that" + + "were removed from the source directory" + ); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_BLOCK_SIZE.getName(), true, + "preserve block size"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_REPLICATION_NUMBER.getName(), true, + "preserve replication count"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_PERMISSIONS.getName(), true, + "preserve permissions"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_USER.getName(), true, + "preserve user"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_GROUP.getName(), true, + "preserve group"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_CHECKSUM_TYPE.getName(), true, + "preserve checksum type"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_ACL.getName(), true, + "preserve ACL"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_XATTR.getName(), true, + "preserve XATTR"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_TIMES.getName(), true, + "preserve access and modification times"); + opt.setRequired(false); + options.addOption(opt); + try { return new GnuParser().parse(options, args); } catch (ParseException pe) { LOG.info("Unabel to parse commad line arguments for HdfsSnapshotReplicator " + pe.getMessage()); - throw new FalconException(pe.getMessage()); + throw new FalconException(pe.getMessage()); } } diff --git a/addons/hdfs-snapshot-mirroring/src/test/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicatorTest.java b/addons/hdfs-snapshot-mirroring/src/test/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicatorTest.java index fe7ced59c..e66544d0c 100644 --- a/addons/hdfs-snapshot-mirroring/src/test/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicatorTest.java +++ b/addons/hdfs-snapshot-mirroring/src/test/java/org/apache/falcon/snapshots/replication/HdfsSnapshotReplicatorTest.java @@ -25,6 +25,7 @@ import org.apache.falcon.entity.v0.EntityType; import org.apache.falcon.entity.v0.cluster.Cluster; import org.apache.falcon.extensions.mirroring.hdfsSnapshot.HdfsSnapshotMirrorProperties; +import org.apache.falcon.util.ReplicationDistCpOption; import org.apache.falcon.snapshots.util.HdfsSnapshotUtil; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -57,7 +58,7 @@ public class HdfsSnapshotReplicatorTest extends HdfsSnapshotReplicator { private FsPermission fsPermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL); - private String[] args = {"--" + HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName(), "1", + private String[] args = {"--" + HdfsSnapshotMirrorProperties.MAX_MAPS.getName(), "1", "--" + HdfsSnapshotMirrorProperties.MAP_BANDWIDTH_IN_MB.getName(), "100", "--" + HdfsSnapshotMirrorProperties.SOURCE_NN.getName(), "hdfs://localhost:54136", "--" + HdfsSnapshotMirrorProperties.SOURCE_EXEC_URL.getName(), "localhost:8021", @@ -67,6 +68,8 @@ public class HdfsSnapshotReplicatorTest extends HdfsSnapshotReplicator { "/apps/falcon/snapshot-replication/sourceDir/", "--" + HdfsSnapshotMirrorProperties.TARGET_SNAPSHOT_DIR.getName(), "/apps/falcon/snapshot-replication/targetDir/", + "--" + ReplicationDistCpOption.DISTCP_OPTION_IGNORE_ERRORS.getName(), "false", + "--" + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_ACL.getName(), "false", "--" + HdfsSnapshotMirrorProperties.TDE_ENCRYPTION_ENABLED.getName(), "false", "--" + HdfsSnapshotMirrorProperties.SNAPSHOT_JOB_NAME.getName(), "snapshotJobName", }; @@ -87,7 +90,7 @@ public void init() throws Exception { miniDfs.allowSnapshot(targetDir); cmd = getCommand(args); - Assert.assertEquals(cmd.getOptionValue(HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName()), "1"); + Assert.assertEquals(cmd.getOptionValue(HdfsSnapshotMirrorProperties.MAX_MAPS.getName()), "1"); Assert.assertEquals(cmd.getOptionValue(HdfsSnapshotMirrorProperties.MAP_BANDWIDTH_IN_MB.getName()), "100"); } diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java index d89148708..9decd30f7 100644 --- a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java +++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java @@ -30,6 +30,7 @@ public enum HiveDRArgs { SOURCE_CLUSTER("sourceCluster", "source cluster"), SOURCE_METASTORE_URI("sourceMetastoreUri", "source meta store uri"), SOURCE_HS2_URI("sourceHiveServer2Uri", "source HS2 uri"), + SOURCE_HS2_URI_EXTRA_OPTS("sourceHiveServer2ExtraOpts", "source HS2 extra opts", false), SOURCE_DATABASES("sourceDatabases", "comma source databases"), SOURCE_DATABASE("sourceDatabase", "First source database"), SOURCE_TABLES("sourceTables", "comma source tables"), @@ -47,6 +48,7 @@ public enum HiveDRArgs { // target meta store details TARGET_METASTORE_URI("targetMetastoreUri", "source meta store uri"), TARGET_HS2_URI("targetHiveServer2Uri", "source meta store uri"), + TARGET_HS2_URI_EXTRA_OPTS("targetHiveServer2ExtraOpts", "target HS2 extra opts", false), TARGET_STAGING_PATH("targetStagingPath", "source staging path for data", false), diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java index e45b0d879..2701e0277 100644 --- a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java +++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java @@ -143,13 +143,13 @@ private void init(String[] args) throws Exception { LOG.info("srcStaginPath: {}", inputOptions.getSourceStagingPath()); LOG.info("tgtStaginPath: {}", inputOptions.getTargetStagingPath()); - Configuration sourceConf = FileUtils.getConfiguration(inputOptions.getSourceWriteEP(), + Configuration sourceConf = FileUtils.getConfiguration(getConf(), inputOptions.getSourceWriteEP(), inputOptions.getSourceNNKerberosPrincipal()); sourceClusterFS = FileSystem.get(sourceConf); - Configuration targetConf = FileUtils.getConfiguration(inputOptions.getTargetWriteEP(), + Configuration targetConf = FileUtils.getConfiguration(getConf(), inputOptions.getTargetWriteEP(), inputOptions.getTargetNNKerberosPrincipal()); targetClusterFS = FileSystem.get(targetConf); - jobConf = FileUtils.getConfiguration(inputOptions.getJobClusterWriteEP(), + jobConf = FileUtils.getConfiguration(getConf(), inputOptions.getJobClusterWriteEP(), inputOptions.getJobClusterNNPrincipal()); jobFS = FileSystem.get(jobConf); diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java index f4bb31c89..7c415c364 100644 --- a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java +++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java @@ -48,7 +48,8 @@ public class CopyReducer extends Reducer { @Override protected void setup(Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); - FileSystem fs= FileSystem.get(FileUtils.getConfiguration( + FileSystem fs= FileSystem.get( + FileUtils.getConfiguration(context.getConfiguration(), conf.get(HiveDRArgs.TARGET_NN.getName()), conf.get(HiveDRArgs.TARGET_NN_KERBEROS_PRINCIPAL.getName()))); hiveDRStore = new HiveDRStatusStore(fs); diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java index 590a7e39a..492c70e9e 100644 --- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java +++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java @@ -60,12 +60,14 @@ public class EventUtils { private Configuration conf = null; private String sourceHiveServer2Uri = null; + private String sourceHS2UriExtraOptions = null; private String sourceDatabase = null; private String sourceNN = null; private String sourceNNKerberosPrincipal = null; private String jobNN = null; private String jobNNKerberosPrincipal = null; private String targetHiveServer2Uri = null; + private String targetHS2UriExtraOptions = null; private String sourceStagingPath = null; private String targetStagingPath = null; private String targetNN = null; @@ -91,6 +93,7 @@ public class EventUtils { public EventUtils(Configuration conf) { this.conf = conf; sourceHiveServer2Uri = conf.get(HiveDRArgs.SOURCE_HS2_URI.getName()); + sourceHS2UriExtraOptions = conf.get(HiveDRArgs.SOURCE_HS2_URI_EXTRA_OPTS.getName()); sourceDatabase = conf.get(HiveDRArgs.SOURCE_DATABASE.getName()); sourceNN = conf.get(HiveDRArgs.SOURCE_NN.getName()); sourceNNKerberosPrincipal = conf.get(HiveDRArgs.SOURCE_NN_KERBEROS_PRINCIPAL.getName()); @@ -98,6 +101,7 @@ public EventUtils(Configuration conf) { jobNN = conf.get(HiveDRArgs.JOB_CLUSTER_NN.getName()); jobNNKerberosPrincipal = conf.get(HiveDRArgs.JOB_CLUSTER_NN_KERBEROS_PRINCIPAL.getName()); targetHiveServer2Uri = conf.get(HiveDRArgs.TARGET_HS2_URI.getName()); + targetHS2UriExtraOptions = conf.get(HiveDRArgs.TARGET_HS2_URI_EXTRA_OPTS.getName()); targetStagingPath = conf.get(HiveDRArgs.TARGET_STAGING_PATH.getName()); targetNN = conf.get(HiveDRArgs.TARGET_NN.getName()); targetNNKerberosPrincipal = conf.get(HiveDRArgs.TARGET_NN_KERBEROS_PRINCIPAL.getName()); @@ -122,29 +126,62 @@ public void setupConnection() throws Exception { if (conf.get(HiveDRArgs.EXECUTION_STAGE.getName()) .equalsIgnoreCase(HiveDRUtils.ExecutionStage.EXPORT.name())) { - String connString = JDBC_PREFIX + sourceHiveServer2Uri + "/" + sourceDatabase; + String authString = null; if (StringUtils.isNotEmpty(conf.get(HiveDRArgs.SOURCE_HIVE2_KERBEROS_PRINCIPAL.getName()))) { - connString += authTokenString; + authString = authTokenString; } + + String connString = getSourceHS2ConnectionUrl(authString); sourceConnection = DriverManager.getConnection(connString, user, password.getProperty("password")); sourceStatement = sourceConnection.createStatement(); } else { - String connString = JDBC_PREFIX + targetHiveServer2Uri + "/" + sourceDatabase; + String authString = null; if (StringUtils.isNotEmpty(conf.get(HiveDRArgs.TARGET_HIVE2_KERBEROS_PRINCIPAL.getName()))) { - connString += authTokenString; + authString = authTokenString; } + String connString = getTargetHS2ConnectionUrl(authString); targetConnection = DriverManager.getConnection(connString, user, password.getProperty("password")); targetStatement = targetConnection.createStatement(); } } + private String getSourceHS2ConnectionUrl(final String authTokenString) { + return getHS2ConnectionUrl(sourceHiveServer2Uri, sourceDatabase, + authTokenString, sourceHS2UriExtraOptions); + } + + private String getTargetHS2ConnectionUrl(final String authTokenString) { + return getHS2ConnectionUrl(targetHiveServer2Uri, sourceDatabase, + authTokenString, targetHS2UriExtraOptions); + } + + public static String getHS2ConnectionUrl(final String hs2Uri, final String database, + final String authTokenString, final String hs2UriExtraOpts) { + StringBuilder connString = new StringBuilder(); + connString.append(JDBC_PREFIX).append(StringUtils.removeEnd(hs2Uri, "/")).append("/").append(database); + + if (StringUtils.isNotBlank(authTokenString)) { + connString.append(authTokenString); + } + + if (StringUtils.isNotBlank(hs2UriExtraOpts) && !("NA".equalsIgnoreCase(hs2UriExtraOpts))) { + if (!hs2UriExtraOpts.startsWith(";")) { + connString.append(";"); + } + connString.append(hs2UriExtraOpts); + } + + LOG.info("getHS2ConnectionUrl connection uri: {}", connString); + return connString.toString(); + } + public void initializeFS() throws IOException { LOG.info("Initializing staging directory"); sourceStagingUri = new Path(sourceNN, sourceStagingPath).toString(); targetStagingUri = new Path(targetNN, targetStagingPath).toString(); - sourceFileSystem = FileSystem.get(FileUtils.getConfiguration(sourceNN, sourceNNKerberosPrincipal)); - jobFileSystem = FileSystem.get(FileUtils.getConfiguration(jobNN, jobNNKerberosPrincipal)); - targetFileSystem = FileSystem.get(FileUtils.getConfiguration(targetNN, targetNNKerberosPrincipal)); + sourceFileSystem = FileSystem.get(FileUtils.getConfiguration(conf, sourceNN, sourceNNKerberosPrincipal)); + jobFileSystem = FileSystem.get(FileUtils.getConfiguration(conf, jobNN, jobNNKerberosPrincipal)); + targetFileSystem = FileSystem.get(FileUtils.getConfiguration(conf, targetNN, targetNNKerberosPrincipal)); } private String readEvents(Path eventFileName) throws IOException { @@ -152,7 +189,7 @@ private String readEvents(Path eventFileName) throws IOException { BufferedReader in = new BufferedReader(new InputStreamReader(jobFileSystem.open(eventFileName))); try { String line; - while ((line=in.readLine())!=null) { + while ((line = in.readLine()) != null) { eventString.append(line); eventString.append(DelimiterUtils.NEWLINE_DELIM); } @@ -327,7 +364,7 @@ public void invokeCopy() throws Exception { public DistCpOptions getDistCpOptions() { // DistCpOptions expects the first argument to be a file OR a list of Paths - List sourceUris=new ArrayList<>(); + List sourceUris = new ArrayList<>(); sourceUris.add(new Path(sourceStagingUri)); DistCpOptions distcpOptions = new DistCpOptions(sourceUris, new Path(targetStagingUri)); diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java index 8b5c8654a..dae4849eb 100644 --- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java +++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java @@ -42,14 +42,14 @@ public final class FileUtils { private FileUtils() {} - public static Configuration getConfiguration(final String writeEP, - final String nnKerberosPrincipal) throws IOException { - Configuration conf = HiveDRUtils.getDefaultConf(); - conf.set("fs.defaultFS", writeEP); + public static Configuration getConfiguration(Configuration conf, + final String writeEP, final String nnKerberosPrincipal) throws IOException { + Configuration newConf = new Configuration(conf); + newConf.set("fs.defaultFS", writeEP); if (StringUtils.isNotEmpty(nnKerberosPrincipal)) { - conf.set("dfs.namenode.kerberos.principal", nnKerberosPrincipal); + newConf.set("dfs.namenode.kerberos.principal", nnKerberosPrincipal); } - return conf; + return newConf; } public static void validatePath(final FileSystem fileSystem, final Path basePath) throws IOException { diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java index 44f098992..ee459a3c0 100644 --- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java +++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java @@ -49,8 +49,8 @@ public class HiveDRStatusStore extends DRStatusStore { private static final Logger LOG = LoggerFactory.getLogger(DRStatusStore.class); private FileSystem fileSystem; - private static final String DEFAULT_STORE_PATH = StringUtils.removeEnd - (DRStatusStore.BASE_DEFAULT_STORE_PATH, File.separator) + File.separator + private static final String DEFAULT_STORE_PATH = StringUtils.removeEnd( + DRStatusStore.BASE_DEFAULT_STORE_PATH, File.separator) + File.separator + "hiveReplicationStatusStore" + File.separator; private static final FsPermission DEFAULT_STATUS_DIR_PERMISSION = @@ -90,10 +90,10 @@ private void init(FileSystem targetFileSystem) throws IOException { } } - /** - get all DB updated by the job. get all current table statuses for the DB merge the latest repl - status with prev table repl statuses. If all statuses are success, store the status as success - with largest eventId for the DB else store status as failure for the DB and lowest eventId. + /** + * get all DB updated by the job. get all current table statuses for the DB merge the latest repl + * status with prev table repl statuses. If all statuses are success, store the status as success + * with largest eventId for the DB else store status as failure for the DB and lowest eventId. */ @Override public void updateReplicationStatus(String jobName, List statusList) @@ -161,13 +161,13 @@ public void deleteReplicationStatus(String jobName, String database) throws Hive } } catch (IOException e) { throw new HiveReplicationException("Failed to delete status for Job " - + jobName + " and DB "+ database, e); + + jobName + " and DB " + database, e); } } private DBReplicationStatus getDbReplicationStatus(String source, String target, String jobName, - String database) throws HiveReplicationException{ + String database) throws HiveReplicationException { DBReplicationStatus dbReplicationStatus = null; Path statusDbDirPath = getStatusDbDirPath(database); Path statusDirPath = getStatusDirPath(database, jobName); @@ -253,7 +253,7 @@ public void rotateStatusFiles(Path statusDir, int numFiles, int maxFileAge) thro while (fileIterator.hasNext()) { fileList.add(fileIterator.next().getPath().toString()); } - if (fileList.size() > (numFiles+1)) { + if (fileList.size() > (numFiles + 1)) { // delete some files, as long as they are older than the time. Collections.sort(fileList); for (String file : fileList.subList(0, (fileList.size() - numFiles + 1))) { @@ -289,11 +289,11 @@ private DBReplicationStatus readStatusFile(Path statusDirPath) throws HiveReplic } public void checkForReplicationConflict(String newSource, String jobName, - String database, String table) throws HiveReplicationException { + String database, String table) throws HiveReplicationException { try { Path globPath = new Path(getStatusDbDirPath(database), "*" + File.separator + "latest.json"); FileStatus[] files = fileSystem.globStatus(globPath); - for(FileStatus file : files) { + for (FileStatus file : files) { DBReplicationStatus dbFileStatus = new DBReplicationStatus(IOUtils.toString( fileSystem.open(file.getPath()))); ReplicationStatus existingJob = dbFileStatus.getDatabaseStatus(); @@ -319,7 +319,7 @@ public void checkForReplicationConflict(String newSource, String jobName, allowed as long as the target tables are different. For example, job1 can replicate db1.table1 and job2 can replicate db1.table2. Both jobs cannot replicate to same table. */ - for(Map.Entry entry : dbFileStatus.getTableStatuses().entrySet()) { + for (Map.Entry entry : dbFileStatus.getTableStatuses().entrySet()) { if (table.equals(entry.getKey())) { throw new HiveReplicationException("Two different jobs are trying to replicate to same table " + entry.getKey() + ". New job = " + jobName diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/util/EventUtilsTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/util/EventUtilsTest.java new file mode 100644 index 000000000..2e785193d --- /dev/null +++ b/addons/hivedr/src/test/java/org/apache/falcon/hive/util/EventUtilsTest.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon.hive.util; + +import org.testng.Assert; +import org.testng.annotations.Test; + +/** + * Unit tests for EventUtils. + */ +@Test +public class EventUtilsTest { + private static final String JDBC_PREFIX = "jdbc:"; + private static final String HS2_URI = "hive2://localhost:10000:"; + private static final String HS2_ZK_URI = "hive2://host1.com:2181,host2.com:2181/"; + private static final String DATABASE = "test"; + private static final String HS2_SSL_EXTRA_OPTS = "ssl=true;" + + "sslTrustStore=/var/lib/security/keystores/gateway.jks;" + + "trustStorePassword=1234?hive.server2.transport.mode=http;hive.server2.thrift.http" + + ".path=gateway/primaryCLuster/hive"; + private static final String HS2_ZK_EXTRA_OPTS = ";serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2"; + private static final String AUTHTOKEN_STRING = ";auth=delegationToken"; + public EventUtilsTest() { + } + + @Test + public void validateHs2Uri() { + final String expectedUri = JDBC_PREFIX + HS2_URI + "/" + DATABASE; + + Assert.assertEquals(EventUtils.getHS2ConnectionUrl(HS2_URI, DATABASE, null, null), expectedUri); + Assert.assertEquals(EventUtils.getHS2ConnectionUrl(HS2_URI, DATABASE, null, "NA"), expectedUri); + Assert.assertEquals(EventUtils.getHS2ConnectionUrl(HS2_URI, DATABASE, AUTHTOKEN_STRING, + null), expectedUri + AUTHTOKEN_STRING); + } + + @Test + public void validateHs2UriWhenSSLEnabled() { + final String expectedUri = JDBC_PREFIX + HS2_URI + "/" + DATABASE; + + Assert.assertEquals(EventUtils.getHS2ConnectionUrl(HS2_URI, DATABASE, null, HS2_SSL_EXTRA_OPTS), + expectedUri + ";" + HS2_SSL_EXTRA_OPTS); + Assert.assertEquals(EventUtils.getHS2ConnectionUrl(HS2_URI, DATABASE, AUTHTOKEN_STRING, HS2_SSL_EXTRA_OPTS), + expectedUri + AUTHTOKEN_STRING + ";" + HS2_SSL_EXTRA_OPTS); + } + + @Test + public void validateHs2UriWhenZKDiscoveryEnabled() { + final String expectedUri = JDBC_PREFIX + HS2_ZK_URI + DATABASE; + + Assert.assertEquals(EventUtils.getHS2ConnectionUrl(HS2_ZK_URI, DATABASE, null, HS2_ZK_EXTRA_OPTS), + expectedUri + HS2_ZK_EXTRA_OPTS); + Assert.assertEquals(EventUtils.getHS2ConnectionUrl(HS2_ZK_URI, DATABASE, AUTHTOKEN_STRING, HS2_ZK_EXTRA_OPTS), + expectedUri + AUTHTOKEN_STRING + HS2_ZK_EXTRA_OPTS); + } +} diff --git a/cli/pom.xml b/cli/pom.xml index e0a896845..a41e6d920 100644 --- a/cli/pom.xml +++ b/cli/pom.xml @@ -215,15 +215,6 @@ - - org.apache.rat - apache-rat-plugin - - - falcon-cli-hist.log - - - diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java b/cli/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java index 1b6d289ab..2c2d39200 100644 --- a/cli/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java +++ b/cli/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java @@ -23,20 +23,22 @@ import org.apache.commons.cli.Option; import org.apache.commons.cli.OptionGroup; import org.apache.commons.cli.Options; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; import org.apache.falcon.client.FalconCLIException; import org.apache.falcon.client.FalconClient; import java.io.IOException; import java.util.HashSet; import java.util.Set; +import static org.apache.falcon.client.FalconCLIConstants.STACK_OPTION; /** * Admin extension to Falcon Command Line Interface - wraps the RESTful API for admin commands. */ public class FalconAdminCLI extends FalconCLI { - private static final String STACK_OPTION = "stack"; + + public FalconAdminCLI() throws Exception { super(); @@ -73,8 +75,7 @@ public Options createAdminOptions() { return adminOptions; } - public int adminCommand(CommandLine commandLine, FalconClient client, - String falconUrl) throws IOException { + public int adminCommand(CommandLine commandLine, FalconClient client, String falconUrl) throws IOException { String result; Set optionsList = new HashSet(); for (Option option : commandLine.getOptions()) { diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconCLI.java b/cli/src/main/java/org/apache/falcon/cli/FalconCLI.java index bff818a20..0dd11f610 100644 --- a/cli/src/main/java/org/apache/falcon/cli/FalconCLI.java +++ b/cli/src/main/java/org/apache/falcon/cli/FalconCLI.java @@ -24,21 +24,18 @@ import org.apache.commons.cli.ParseException; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; import org.apache.falcon.cliParser.CLIParser; import org.apache.falcon.client.FalconCLIException; import org.apache.falcon.client.FalconClient; -import org.apache.falcon.entity.v0.EntityType; -import org.apache.falcon.resource.EntityList; -import org.apache.falcon.resource.InstancesResult; -import org.apache.falcon.resource.InstancesSummaryResult; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; -import java.util.Arrays; import java.util.Properties; import java.util.concurrent.atomic.AtomicReference; +import static org.apache.falcon.client.FalconCLIConstants.FALCON_URL; + /** * Falcon Command Line Interface - wraps the RESTful API. @@ -48,45 +45,6 @@ public class FalconCLI { public static final AtomicReference ERR = new AtomicReference(System.err); public static final AtomicReference OUT = new AtomicReference(System.out); - public static final String ENV_FALCON_DEBUG = "FALCON_DEBUG"; - public static final String DEBUG_OPTION = "debug"; - public static final String URL_OPTION = "url"; - public static final String FALCON_URL = "FALCON_URL"; - - public static final String ADMIN_CMD = "admin"; - public static final String HELP_CMD = "help"; - public static final String METADATA_CMD = "metadata"; - public static final String ENTITY_CMD = "entity"; - public static final String INSTANCE_CMD = "instance"; - public static final String RECIPE_CMD = "recipe"; - - public static final String TYPE_OPT = "type"; - public static final String COLO_OPT = "colo"; - public static final String CLUSTER_OPT = "cluster"; - public static final String FEED_OPT = "feed"; - public static final String PROCESS_OPT = "process"; - public static final String ENTITY_NAME_OPT = "name"; - public static final String FILE_PATH_OPT = "file"; - public static final String VERSION_OPT = "version"; - public static final String SCHEDULE_OPT = "schedule"; - public static final String SUSPEND_OPT = "suspend"; - public static final String RESUME_OPT = "resume"; - public static final String STATUS_OPT = "status"; - public static final String SUMMARY_OPT = "summary"; - public static final String DEPENDENCY_OPT = "dependency"; - public static final String LIST_OPT = "list"; - public static final String SKIPDRYRUN_OPT = "skipDryRun"; - public static final String FILTER_BY_OPT = "filterBy"; - public static final String ORDER_BY_OPT = "orderBy"; - public static final String SORT_ORDER_OPT = "sortOrder"; - public static final String OFFSET_OPT = "offset"; - public static final String NUM_RESULTS_OPT = "numResults"; - public static final String START_OPT = "start"; - public static final String END_OPT = "end"; - public static final String CURRENT_COLO = "current.colo"; - public static final String CLIENT_PROPERTIES = "/client.properties"; - public static final String DO_AS_OPT = "doAs"; - private final Properties clientProperties; public FalconCLI() throws Exception { @@ -206,29 +164,6 @@ protected Integer parseIntegerInput(String optionValue, Integer defaultVal, Stri return integer; } - public static void validateEntityTypeForSummary(String type) { - EntityType entityType = EntityType.getEnum(type); - if (!entityType.isSchedulable()) { - throw new FalconCLIException("Invalid entity type " + entityType - + " for EntitySummary API. Valid options are feed or process"); - } - } - - protected void validateNotEmpty(String paramVal, String paramName) { - if (StringUtils.isBlank(paramVal)) { - throw new FalconCLIException("Missing argument : " + paramName); - } - } - - protected void validateSortOrder(String sortOrder) { - if (!StringUtils.isBlank(sortOrder)) { - if (!sortOrder.equalsIgnoreCase("asc") && !sortOrder.equalsIgnoreCase("desc")) { - throw new FalconCLIException("Value for param sortOrder should be \"asc\" or \"desc\". It is : " - + sortOrder); - } - } - } - protected String getColo(String colo) throws IOException { if (colo == null) { Properties prop = getClientProperties(); @@ -237,52 +172,6 @@ protected String getColo(String colo) throws IOException { return colo; } - public static void validateFilterBy(String filterBy, String filterType) { - if (StringUtils.isEmpty(filterBy)) { - return; - } - String[] filterSplits = filterBy.split(","); - for (String s : filterSplits) { - String[] tempKeyVal = s.split(":", 2); - try { - if (filterType.equals("entity")) { - EntityList.EntityFilterByFields.valueOf(tempKeyVal[0].toUpperCase()); - } else if (filterType.equals("instance")) { - InstancesResult.InstanceFilterFields.valueOf(tempKeyVal[0].toUpperCase()); - }else if (filterType.equals("summary")) { - InstancesSummaryResult.InstanceSummaryFilterFields.valueOf(tempKeyVal[0].toUpperCase()); - } else { - throw new IllegalArgumentException("Invalid API call: filterType is not valid"); - } - } catch (IllegalArgumentException ie) { - throw new FalconCLIException("Invalid filterBy argument : " + tempKeyVal[0] + " in : " + s); - } - } - } - - public static void validateOrderBy(String orderBy, String action) { - if (StringUtils.isBlank(orderBy)) { - return; - } - if (action.equals("instance")) { - if (Arrays.asList(new String[]{"status", "cluster", "starttime", "endtime"}) - .contains(orderBy.toLowerCase())) { - return; - } - } else if (action.equals("entity")) { - if (Arrays.asList(new String[] {"type", "name"}).contains(orderBy.toLowerCase())) { - return; - } - } else if (action.equals("summary")) { - if (Arrays.asList(new String[]{"cluster"}) - .contains(orderBy.toLowerCase())) { - return; - } - } - throw new FalconCLIException("Invalid orderBy argument : " + orderBy); - } - - protected String getFalconEndpoint(CommandLine commandLine) throws IOException { String url = commandLine.getOptionValue(FalconCLIConstants.URL_OPTION); if (url == null) { diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java b/cli/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java index facb147e3..a8aea527d 100644 --- a/cli/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java +++ b/cli/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java @@ -23,7 +23,8 @@ import org.apache.commons.cli.OptionGroup; import org.apache.commons.cli.Options; import org.apache.commons.lang3.StringUtils; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; +import org.apache.falcon.ValidationUtil; import org.apache.falcon.ResponseHelper; import org.apache.falcon.client.FalconCLIException; import org.apache.falcon.client.FalconClient; @@ -38,71 +39,82 @@ import java.util.HashSet; import java.util.Set; +import static org.apache.falcon.client.FalconCLIConstants.SUBMIT_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.UPDATE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SCHEDULE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUSPEND_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RESUME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DELETE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUBMIT_AND_SCHEDULE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.VALIDATE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUBMIT_AND_SCHEDULE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VALIDATE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEFINITION_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEPENDENCY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LOOKUP_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SLA_MISS_ALERT_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUMMARY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TOUCH_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.UPDATE_CLUSTER_DEPENDENTS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.URL_OPTION_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.URL_OPTION; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT; +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FIELDS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FIELDS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TAGS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TAGS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NAMESEQ_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TAGKEYS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TAGKEYS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NUM_INSTANCES_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_INSTANCES_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PATH_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PATH_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SKIPDRYRUN_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SKIPDRYRUN_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DO_AS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DO_AS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PROPS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PROPS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SHOWSCHEDULER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SHOWSCHEDULER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEBUG_OPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEBUG_OPTION_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NAMESEQ_OPT_DESCRIPTION; +import static org.apache.falcon.ValidationUtil.validateNotEmpty; +import static org.apache.falcon.ValidationUtil.validateSortOrder; + + /** * Entity extension to Falcon Command Line Interface - wraps the RESTful API for entities. */ public class FalconEntityCLI extends FalconCLI { - public static final String SUBMIT_OPT_DESCRIPTION = "Submits an entity xml to Falcon"; - public static final String UPDATE_OPT_DESCRIPTION = "Updates an existing entity"; - public static final String DELETE_OPT_DESCRIPTION = "Deletes an entity in Falcon, and kills its instance from " - + "workflow engine"; - public static final String SUBMIT_AND_SCHEDULE_OPT = "submitAndSchedule"; - public static final String SUBMIT_AND_SCHEDULE_OPT_DESCRIPTION = "Submits an entity to Falcon and " - + "schedules it immediately"; - public static final String VALIDATE_OPT = "validate"; - public static final String VALIDATE_OPT_DESCRIPTION = "Validates an entity based on the entity type"; - public static final String DEFINITION_OPT_DESCRIPTION = "Gets the Definition of entity"; - public static final String SLA_MISS_ALERT_OPT_DESCRIPTION = "Get missing feed instances which missed SLA"; - - - public static final String LOOKUP_OPT_DESCRIPTION = "Lookup a feed given its instance's path"; - public static final String PATH_OPT = "path"; - public static final String PATH_OPT_DESCRIPTION = "Path for a feed's instance"; - public static final String TOUCH_OPT_DESCRIPTION = "Force update the entity in workflow engine" - + "(even without any changes to entity)"; - public static final String PROPS_OPT = "properties"; - public static final String PROPS_OPT_DESCRIPTION = "User supplied comma separated key value properties"; - public static final String FIELDS_OPT = "fields"; - public static final String FIELDS_OPT_DESCRIPTION = "Entity fields to show for a request"; - public static final String TAGS_OPT = "tags"; - public static final String TAGS_OPT_DESCRIPTION = "Filter returned entities by the specified tags"; - public static final String NUM_INSTANCES_OPT = "numInstances"; - public static final String NUM_INSTANCES_OPT_DESCRIPTION = "Number of instances to return per entity " - + "summary request"; - public static final String NAMESEQ_OPT = "nameseq"; - public static final String NAMESEQ_OPT_DESCRIPTION = "Subsequence of entity name"; - public static final String TAGKEYS_OPT = "tagkeys"; - public static final String TAGKEYS_OPT_DESCRIPTION = "Keywords in tags"; - public static final String OFFSET_OPT_DESCRIPTION = "Start returning entities from this offset"; - public static final String SHOWSCHEDULER_OPT = "showScheduler"; - public static final String SHOWSCHEDULER_OPT_DESCRIPTION = "To return the scheduler " - + "on which the entity is scheduled."; - public static final String DEBUG_OPTION_DESCRIPTION = "Use debug mode to see debugging statements on stdout"; - public static final String URL_OPTION_DESCRIPTION = "Falcon URL"; - public static final String TYPE_OPT_DESCRIPTION = "Type of the entity. Valid entity types are: cluster, feed, " - + "process and datasource."; - public static final String COLO_OPT_DESCRIPTION = "Colo name"; - public static final String END_OPT_DESCRIPTION = "End time is optional for summary"; - public static final String CLUSTER_OPT_DESCRIPTION = "Cluster name"; - public static final String ENTITY_NAME_OPT_DESCRIPTION = "Name of the entity, recommended but not mandatory " - + "to be unique."; - public static final String FILE_PATH_OPT_DESCRIPTION = "Path to entity xml file"; - public static final String SCHEDULE_OPT_DESCRIPTION = "Schedules a submited entity in Falcon"; - public static final String SUSPEND_OPT_DESCRIPTION = "Suspends a running entity in Falcon"; - public static final String RESUME_OPT_DESCRIPTION = "Resumes a suspended entity in Falcon"; - public static final String STATUS_OPT_DESCRIPTION = "Gets the status of entity"; - public static final String SUMMARY_OPT_DESCRIPTION = "Get summary of instances for list of entities"; - public static final String DEPENDENCY_OPT_DESCRIPTION = "Gets the dependencies of entity"; - public static final String LIST_OPT_DESCRIPTION = "List entities registered for a type"; - public static final String SKIPDRYRUN_OPT_DESCRIPTION = "skip dry run in workflow engine"; - public static final String FILTER_BY_OPT_DESCRIPTION = "Filter returned entities by the specified status"; - public static final String ORDER_BY_OPT_DESCRIPTION = "Order returned entities by this field"; - public static final String SORT_ORDER_OPT_DESCRIPTION = "asc or desc order for results"; - public static final String NUM_RESULTS_OPT_DESCRIPTION = "Number of results to return per request"; - public static final String START_OPT_DESCRIPTION = "Start time is optional for summary"; - public static final String DO_AS_OPT_DESCRIPTION = "doAs user"; - public FalconEntityCLI() throws Exception { super(); } @@ -129,11 +141,10 @@ public Options createEntityOptions() { Option entitySummary = new Option(FalconCLIConstants.SUMMARY_OPT, false, SUMMARY_OPT_DESCRIPTION); Option touch = new Option(FalconCLIConstants.TOUCH_OPT, false, TOUCH_OPT_DESCRIPTION); - Option updateClusterDependents = new Option(FalconCLIConstants.UPDATE_CLUSTER_DEPENDENTS_OPT, false, + Option updateClusterDependents = new Option(UPDATE_CLUSTER_DEPENDENTS_OPT, false, "Updates dependent entities of a cluster in workflow engine"); OptionGroup group = new OptionGroup(); - group.addOption(submit); group.addOption(update); group.addOption(updateClusterDependents); group.addOption(schedule); @@ -150,6 +161,7 @@ public Options createEntityOptions() { group.addOption(slaAlert); group.addOption(entitySummary); group.addOption(touch); + group.addOption(submit); Option url = new Option(URL_OPTION, true, URL_OPTION_DESCRIPTION); Option entityType = new Option(TYPE_OPT, true, TYPE_OPT_DESCRIPTION); @@ -245,7 +257,7 @@ public void entityCommand(CommandLine commandLine, FalconClient client) throws I } EntityType entityTypeEnum = null; if (optionsList.contains(FalconCLIConstants.LIST_OPT) - || optionsList.contains(FalconCLIConstants.UPDATE_CLUSTER_DEPENDENTS_OPT)) { + || optionsList.contains(UPDATE_CLUSTER_DEPENDENTS_OPT)) { if (entityType == null) { entityType = ""; } @@ -328,18 +340,18 @@ public void entityCommand(CommandLine commandLine, FalconClient client) throws I result = client.getDependency(entityType, entityName, doAsUser).toString(); } else if (optionsList.contains(FalconCLIConstants.LIST_OPT)) { validateColo(optionsList); - validateEntityFields(fields); - validateOrderBy(orderBy, entityAction); - validateFilterBy(filterBy, entityAction); + ValidationUtil.validateEntityFields(fields); + ValidationUtil.validateOrderBy(orderBy, entityAction); + ValidationUtil.validateFilterBy(filterBy, entityAction); EntityList entityList = client.getEntityList(entityType, fields, nameSubsequence, tagKeywords, filterBy, filterTags, orderBy, sortOrder, offset, numResults, doAsUser); result = entityList != null ? entityList.toString() : "No entity of type (" + entityType + ") found."; } else if (optionsList.contains(FalconCLIConstants.SUMMARY_OPT)) { - validateEntityTypeForSummary(entityType); + ValidationUtil.validateEntityTypeForSummary(entityType); validateNotEmpty(cluster, FalconCLIConstants.CLUSTER_OPT); - validateEntityFields(fields); - validateFilterBy(filterBy, entityAction); - validateOrderBy(orderBy, entityAction); + ValidationUtil.validateEntityFields(fields); + ValidationUtil.validateFilterBy(filterBy, entityAction); + ValidationUtil.validateOrderBy(orderBy, entityAction); result = ResponseHelper.getString(client.getEntitySummary( entityType, cluster, start, end, fields, filterBy, filterTags, orderBy, sortOrder, offset, numResults, numInstances, doAsUser)); @@ -363,20 +375,6 @@ private void validateColo(Set optionsList) { } } - public static void validateEntityFields(String fields) { - if (StringUtils.isEmpty(fields)) { - return; - } - String[] fieldsList = fields.split(","); - for (String s : fieldsList) { - try { - EntityList.EntityFieldList.valueOf(s.toUpperCase()); - } catch (IllegalArgumentException ie) { - throw new FalconCLIException("Invalid fields argument : " + FalconCLIConstants.FIELDS_OPT); - } - } - } - private Date parseDateString(String time) { if (time != null && !time.isEmpty()) { try { diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java b/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java index a8a30ab07..dafd7a82c 100644 --- a/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java +++ b/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java @@ -27,7 +27,7 @@ import org.apache.commons.cli.OptionGroup; import org.apache.commons.cli.Options; import org.apache.commons.lang3.StringUtils; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; import org.apache.falcon.client.FalconCLIException; import org.apache.falcon.client.FalconClient; import org.apache.falcon.resource.ExtensionInstanceList; diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java b/cli/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java index f882eb5eb..cd2ade072 100644 --- a/cli/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java +++ b/cli/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java @@ -22,7 +22,8 @@ import org.apache.commons.cli.Option; import org.apache.commons.cli.OptionGroup; import org.apache.commons.cli.Options; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; +import org.apache.falcon.ValidationUtil; import org.apache.falcon.LifeCycle; import org.apache.falcon.ResponseHelper; import org.apache.falcon.client.FalconCLIException; @@ -30,32 +31,89 @@ import org.apache.falcon.resource.InstanceDependencyResult; import java.io.IOException; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; +import static org.apache.falcon.client.FalconCLIConstants.RUNNING_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RUNNING_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUMMARY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUMMARY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.KILL_OPT; +import static org.apache.falcon.client.FalconCLIConstants.KILL_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUSPEND_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUSPEND_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RESUME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RESUME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RERUN_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LOG_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LOG_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PARARMS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PARARMS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LISTING_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LISTING_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEPENDENCY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEPENDENCY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TRIAGE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TRIAGE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SEARCH_OPT; +import static org.apache.falcon.client.FalconCLIConstants.URL_OPTION; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RUNID_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTERS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTERS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SOURCECLUSTER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SOURCECLUSTER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT; +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RERUN_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.URL_OPTION_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RUNID_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIFECYCLE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LIFECYCLE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FORCE_RERUN_FLAG; +import static org.apache.falcon.client.FalconCLIConstants.FORCE_RERUN_FLAG_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DO_AS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DO_AS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEBUG_OPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEBUG_OPTION_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.INSTANCE_TIME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.INSTANCE_TIME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ALL_ATTEMPTS; +import static org.apache.falcon.client.FalconCLIConstants.ALL_ATTEMPTS_DESCRIPTION; +import static org.apache.falcon.ValidationUtil.getLifeCycle; +import static org.apache.falcon.ValidationUtil.validateSortOrder; +import static org.apache.falcon.ValidationUtil.validateNotEmpty; + + /** * Instance extension to Falcon Command Line Interface - wraps the RESTful API for instances. */ public class FalconInstanceCLI extends FalconCLI { - private static final String FORCE_RERUN_FLAG = "force"; - private static final String INSTANCE_TIME_OPT = "instanceTime"; - private static final String RUNNING_OPT = "running"; - private static final String KILL_OPT = "kill"; - private static final String RERUN_OPT = "rerun"; - private static final String LOG_OPT = "logs"; - private static final String SEARCH_OPT = "search"; - private static final String ALL_ATTEMPTS = "allAttempts"; - private static final String RUNID_OPT = "runid"; - private static final String CLUSTERS_OPT = "clusters"; - private static final String SOURCECLUSTER_OPT = "sourceClusters"; - private static final String LIFECYCLE_OPT = "lifecycle"; - private static final String PARARMS_OPT = "params"; - private static final String LISTING_OPT = "listing"; - private static final String TRIAGE_OPT = "triage"; - public FalconInstanceCLI() throws Exception { super(); } @@ -63,40 +121,21 @@ public FalconInstanceCLI() throws Exception { public Options createInstanceOptions() { Options instanceOptions = new Options(); - - Option running = new Option(RUNNING_OPT, false, - "Gets running process instances for a given process"); - Option list = new Option(FalconCLIConstants.LIST_OPT, false, - "Gets all instances for a given process in the range start time and optional end time"); - Option status = new Option(FalconCLIConstants.STATUS_OPT, false, - "Gets status of process instances for a given process in the range start time and optional end time"); - Option summary = new Option(FalconCLIConstants.SUMMARY_OPT, false, - "Gets summary of instances for a given process in the range start time and optional end time"); - Option kill = new Option(KILL_OPT, false, - "Kills active process instances for a given process in the range start time and optional end time"); - Option suspend = new Option(FalconCLIConstants.SUSPEND_OPT, false, - "Suspends active process instances for a given process in the range start time and optional end time"); - Option resume = new Option(FalconCLIConstants.RESUME_OPT, false, - "Resumes suspended process instances for a given process " - + "in the range start time and optional end time"); - Option rerun = new Option(RERUN_OPT, false, - "Reruns process instances for a given process in the range start time and " - + "optional end time and overrides properties present in job.properties file"); - Option logs = new Option(LOG_OPT, false, - "Logs print the logs for process instances for a given process in " - + "the range start time and optional end time"); - Option params = new Option(PARARMS_OPT, false, - "Displays the workflow parameters for a given instance of specified nominal time" - + "start time represents nominal time and end time is not considered"); - Option listing = new Option(LISTING_OPT, false, - "Displays feed listing and their status between a start and end time range."); - Option dependency = new Option(FalconCLIConstants.DEPENDENCY_OPT, false, - "Displays dependent instances for a specified instance."); - Option triage = new Option(TRIAGE_OPT, false, - "Triage a feed or process instance and find the failures in it's lineage."); + Option running = new Option(RUNNING_OPT, false, RUNNING_OPT_DESCRIPTION); + Option list = new Option(LIST_OPT, false, LIST_OPT_DESCRIPTION); + Option status = new Option(STATUS_OPT, false, STATUS_OPT_DESCRIPTION); + Option summary = new Option(SUMMARY_OPT, false, SUMMARY_OPT_DESCRIPTION); + Option kill = new Option(KILL_OPT, false, KILL_OPT_DESCRIPTION); + Option suspend = new Option(SUSPEND_OPT, false, SUSPEND_OPT_DESCRIPTION); + Option resume = new Option(RESUME_OPT, false, RESUME_OPT_DESCRIPTION); + Option rerun = new Option(RERUN_OPT, false, RERUN_OPT_DESCRIPTION); + Option logs = new Option(LOG_OPT, false, LOG_OPT_DESCRIPTION); + Option params = new Option(PARARMS_OPT, false, PARARMS_OPT_DESCRIPTION); + Option listing = new Option(LISTING_OPT, false, LISTING_OPT_DESCRIPTION); + Option dependency = new Option(DEPENDENCY_OPT, false, DEPENDENCY_OPT_DESCRIPTION); + Option triage = new Option(TRIAGE_OPT, false, TRIAGE_OPT_DESCRIPTION); Option search = new Option(SEARCH_OPT, false, "Search instances with filtering criteria on the entity, instance time and status."); - OptionGroup group = new OptionGroup(); group.addOption(running); group.addOption(list); @@ -114,52 +153,32 @@ public Options createInstanceOptions() { group.addOption(triage); group.addOption(search); - Option url = new Option(FalconCLIConstants.URL_OPTION, true, "Falcon URL"); - Option start = new Option(FalconCLIConstants.START_OPT, true, - "Start time is required for commands, status, kill, suspend, resume and re-run" - + "and it is nominal time while displaying workflow params"); - Option end = new Option(FalconCLIConstants.END_OPT, true, - "End time is optional for commands, status, kill, suspend, resume and re-run; " - + "if not specified then current time is considered as end time"); - Option runid = new Option(RUNID_OPT, true, - "Instance runid is optional and user can specify the runid, defaults to 0"); - Option clusters = new Option(CLUSTERS_OPT, true, - "clusters is optional for commands kill, suspend and resume, " - + "should not be specified for other commands"); - Option sourceClusters = new Option(SOURCECLUSTER_OPT, true, - " source cluster is optional for commands kill, suspend and resume, " - + "should not be specified for other commands (required for only feed)"); - Option filePath = new Option(FalconCLIConstants.FILE_PATH_OPT, true, - "Path to job.properties file is required for rerun command, " - + "it should contain name=value pair for properties to override for rerun"); - Option entityType = new Option(FalconCLIConstants.TYPE_OPT, true, - "Entity type, can be feed or process xml"); - Option entityName = new Option(FalconCLIConstants.ENTITY_NAME_OPT, true, - "Entity name, can be feed or process name"); - Option colo = new Option(FalconCLIConstants.COLO_OPT, true, - "Colo on which the cmd has to be executed"); - Option lifecycle = new Option(LIFECYCLE_OPT, true, - "describes life cycle of entity , for feed it can be replication/retention " - + "and for process it can be execution"); - Option filterBy = new Option(FalconCLIConstants.FILTER_BY_OPT, true, - "Filter returned instances by the specified fields"); - Option orderBy = new Option(FalconCLIConstants.ORDER_BY_OPT, true, - "Order returned instances by this field"); - Option sortOrder = new Option(FalconCLIConstants.SORT_ORDER_OPT, true, "asc or desc order for results"); - Option offset = new Option(FalconCLIConstants.OFFSET_OPT, true, - "Start returning instances from this offset"); - Option numResults = new Option(FalconCLIConstants.NUM_RESULTS_OPT, true, - "Number of results to return per request"); - Option forceRerun = new Option(FORCE_RERUN_FLAG, false, - "Flag to forcefully rerun entire workflow of an instance"); - Option doAs = new Option(FalconCLIConstants.DO_AS_OPT, true, "doAs user"); - Option debug = new Option(FalconCLIConstants.DEBUG_OPTION, false, "Use debug mode to see" - + " debugging statements on stdout"); - Option instanceTime = new Option(INSTANCE_TIME_OPT, true, "Time for an instance"); + Option url = new Option(URL_OPTION, true, URL_OPTION_DESCRIPTION); + Option start = new Option(START_OPT, true, START_OPT_DESCRIPTION); + Option end = new Option(END_OPT, true, END_OPT_DESCRIPTION); + Option runid = new Option(RUNID_OPT, true, RUNID_OPT_DESCRIPTION); + Option clusters = new Option(CLUSTERS_OPT, true, CLUSTERS_OPT_DESCRIPTION); + Option sourceClusters = new Option(SOURCECLUSTER_OPT, true, SOURCECLUSTER_OPT_DESCRIPTION); + Option filePath = new Option(FILE_PATH_OPT, true, FILE_PATH_OPT_DESCRIPTION); + Option entityType = new Option(TYPE_OPT, true, TYPE_OPT_DESCRIPTION); + Option entityName = new Option(ENTITY_NAME_OPT, true, ENTITY_NAME_OPT_DESCRIPTION); + Option colo = new Option(COLO_OPT, true, COLO_OPT_DESCRIPTION); + Option lifecycle = new Option(LIFECYCLE_OPT, true, LIFECYCLE_OPT_DESCRIPTION); + Option filterBy = new Option(FILTER_BY_OPT, true, FILTER_BY_OPT_DESCRIPTION); + Option orderBy = new Option(ORDER_BY_OPT, true, ORDER_BY_OPT_DESCRIPTION); + Option sortOrder = new Option(SORT_ORDER_OPT, true, SORT_ORDER_OPT_DESCRIPTION); + Option offset = new Option(OFFSET_OPT, true, OFFSET_OPT_DESCRIPTION); + Option numResults = new Option(NUM_RESULTS_OPT, true, NUM_RESULTS_OPT_DESCRIPTION); + Option forceRerun = new Option(FORCE_RERUN_FLAG, false, FORCE_RERUN_FLAG_DESCRIPTION); + Option doAs = new Option(DO_AS_OPT, true, DO_AS_OPT_DESCRIPTION); + Option debug = new Option(DEBUG_OPTION, false, DEBUG_OPTION_DESCRIPTION); + + Option instanceTime = new Option(INSTANCE_TIME_OPT, true, INSTANCE_TIME_OPT_DESCRIPTION); + + Option allAttempts = new Option(ALL_ATTEMPTS, false, ALL_ATTEMPTS_DESCRIPTION); Option instanceStatus = new Option(FalconCLIConstants.INSTANCE_STATUS_OPT, true, "Instance status"); Option nameSubsequence = new Option(FalconCLIConstants.NAMESEQ_OPT, true, "Subsequence of entity name"); Option tagKeywords = new Option(FalconCLIConstants.TAGKEYS_OPT, true, "Keywords in tags"); - Option allAttempts = new Option(ALL_ATTEMPTS, false, "To get all attempts of corresponding instances"); instanceOptions.addOption(url); instanceOptions.addOptionGroup(group); @@ -230,7 +249,7 @@ public void instanceCommand(CommandLine commandLine, FalconClient client) throws validateNotEmpty(colo, FalconCLIConstants.COLO_OPT); validateNotEmpty(start, FalconCLIConstants.START_OPT); validateNotEmpty(type, FalconCLIConstants.TYPE_OPT); - validateEntityTypeForSummary(type); + ValidationUtil.validateEntityTypeForSummary(type); validateNotEmpty(entity, FalconCLIConstants.ENTITY_NAME_OPT); result = client.triage(type, entity, start, colo).toString(); } else if (optionsList.contains(FalconCLIConstants.DEPENDENCY_OPT)) { @@ -239,8 +258,8 @@ public void instanceCommand(CommandLine commandLine, FalconClient client) throws result = ResponseHelper.getString(response); } else if (optionsList.contains(RUNNING_OPT)) { - validateOrderBy(orderBy, instanceAction); - validateFilterBy(filterBy, instanceAction); + ValidationUtil.validateOrderBy(orderBy, instanceAction); + ValidationUtil.validateFilterBy(filterBy, instanceAction); result = ResponseHelper.getString(client.getRunningInstances(type, entity, colo, lifeCycles, filterBy, orderBy, sortOrder, offset, numResults, doAsUser)); } else if (optionsList.contains(FalconCLIConstants.STATUS_OPT) @@ -249,13 +268,13 @@ public void instanceCommand(CommandLine commandLine, FalconClient client) throws if (optionsList.contains(ALL_ATTEMPTS)) { allAttempts = true; } - validateOrderBy(orderBy, instanceAction); - validateFilterBy(filterBy, instanceAction); + ValidationUtil.validateOrderBy(orderBy, instanceAction); + ValidationUtil.validateFilterBy(filterBy, instanceAction); result = ResponseHelper.getString(client.getStatusOfInstances(type, entity, start, end, colo, lifeCycles, filterBy, orderBy, sortOrder, offset, numResults, doAsUser, allAttempts)); } else if (optionsList.contains(FalconCLIConstants.SUMMARY_OPT)) { - validateOrderBy(orderBy, "summary"); - validateFilterBy(filterBy, "summary"); + ValidationUtil.validateOrderBy(orderBy, "summary"); + ValidationUtil.validateFilterBy(filterBy, "summary"); result = ResponseHelper.getString(client.getSummaryOfInstances(type, entity, start, end, colo, lifeCycles, filterBy, orderBy, sortOrder, doAsUser)); } else if (optionsList.contains(KILL_OPT)) { @@ -283,8 +302,8 @@ public void instanceCommand(CommandLine commandLine, FalconClient client) throws result = ResponseHelper.getString(client.rerunInstances(type, entity, start, end, filePath, colo, clusters, sourceClusters, lifeCycles, isForced, doAsUser)); } else if (optionsList.contains(LOG_OPT)) { - validateOrderBy(orderBy, instanceAction); - validateFilterBy(filterBy, instanceAction); + ValidationUtil.validateOrderBy(orderBy, instanceAction); + ValidationUtil.validateFilterBy(filterBy, instanceAction); result = ResponseHelper.getString(client.getLogsOfInstances(type, entity, start, end, colo, runId, lifeCycles, filterBy, orderBy, sortOrder, offset, numResults, doAsUser), runId); } else if (optionsList.contains(PARARMS_OPT)) { @@ -338,21 +357,4 @@ private void validateInstanceCommands(Set optionsList, } } - private List getLifeCycle(String lifeCycleValue) { - - if (lifeCycleValue != null) { - String[] lifeCycleValues = lifeCycleValue.split(","); - List lifeCycles = new ArrayList(); - try { - for (String lifeCycle : lifeCycleValues) { - lifeCycles.add(LifeCycle.valueOf(lifeCycle.toUpperCase().trim())); - } - } catch (IllegalArgumentException e) { - throw new FalconCLIException("Invalid life cycle values: " + lifeCycles, e); - } - return lifeCycles; - } - return null; - } - } diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java b/cli/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java index 98a29ef4b..ec53e7c1c 100644 --- a/cli/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java +++ b/cli/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java @@ -23,7 +23,7 @@ import org.apache.commons.cli.OptionGroup; import org.apache.commons.cli.Options; import org.apache.commons.lang3.StringUtils; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; import org.apache.falcon.client.FalconCLIException; import org.apache.falcon.client.FalconClient; import org.apache.falcon.entity.v0.EntityType; @@ -34,6 +34,60 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; +import static org.apache.falcon.client.FalconCLIConstants.DISCOVERY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DISCOVERY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LINEAGE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LINEAGE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PIPELINE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PIPELINE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RELATIONS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RELATIONS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.URL_OPTION; +import static org.apache.falcon.client.FalconCLIConstants.URL_OPTION_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NAME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NAME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEBUG_OPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEBUG_OPTION_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DIRECTION_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DIRECTION_OPT; +import static org.apache.falcon.client.FalconCLIConstants.VALUE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VALUE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.KEY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.KEY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ID_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ID_OPT; +import static org.apache.falcon.client.FalconCLIConstants.EDGE_CMD_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.EDGE_CMD; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_EDGES_CMD_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_EDGES_CMD; +import static org.apache.falcon.client.FalconCLIConstants.VERTICES_CMD_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VERTICES_CMD; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_CMD_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_CMD; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PROCESS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PROCESS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FEED_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FEED_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DO_AS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DO_AS_DESCRIPTION; +import static org.apache.falcon.ValidationUtil.validateDimensionName; +import static org.apache.falcon.ValidationUtil.validateDimensionType; +import static org.apache.falcon.ValidationUtil.validateId; +import static org.apache.falcon.ValidationUtil.validateScheduleEntity; +import static org.apache.falcon.ValidationUtil.validateVertexEdgesCommand; +import static org.apache.falcon.ValidationUtil.validateVerticesCommand; +import static org.apache.falcon.ValidationUtil.validatePipelineName; + + + /** * Metadata extension to Falcon Command Line Interface - wraps the RESTful API for Metadata. */ @@ -41,21 +95,6 @@ public class FalconMetadataCLI extends FalconCLI { public static final AtomicReference OUT = new AtomicReference(System.out); - // Discovery Commands - public static final String DISCOVERY_OPT = "discovery"; - public static final String LIST_OPT = "list"; - public static final String URL_OPTION = "url"; - - // Lineage Commands - public static final String LINEAGE_OPT = "lineage"; - public static final String VERTEX_CMD = "vertex"; - public static final String VERTICES_CMD = "vertices"; - public static final String VERTEX_EDGES_CMD = "edges"; - public static final String EDGE_CMD = "edge"; - public static final String ID_OPT = "id"; - public static final String KEY_OPT = "key"; - public static final String VALUE_OPT = "value"; - public static final String DIRECTION_OPT = "direction"; public FalconMetadataCLI() throws Exception { super(); @@ -65,29 +104,27 @@ public Options createMetadataOptions() { Options metadataOptions = new Options(); OptionGroup group = new OptionGroup(); - Option discovery = new Option(DISCOVERY_OPT, false, "Discover falcon metadata relations"); - Option lineage = new Option(LINEAGE_OPT, false, "Get falcon metadata lineage information"); + Option discovery = new Option(DISCOVERY_OPT, false, DISCOVERY_OPT_DESCRIPTION); + Option lineage = new Option(LINEAGE_OPT, false, LINEAGE_OPT_DESCRIPTION); group.addOption(discovery); group.addOption(lineage); - Option pipeline = new Option(FalconCLIConstants.PIPELINE_OPT, true, - "Get lineage graph for the entities in a pipeline"); + Option pipeline = new Option(PIPELINE_OPT, true, PIPELINE_OPT_DESCRIPTION); metadataOptions.addOptionGroup(group); // Add discovery options - Option list = new Option(LIST_OPT, false, "List all dimensions"); - Option relations = new Option(FalconCLIConstants.RELATIONS_OPT, false, "List all relations for a dimension"); + Option list = new Option(LIST_OPT, false, LIST_OPT_DESCRIPTION); + Option relations = new Option(RELATIONS_OPT, false, RELATIONS_OPT_DESCRIPTION); metadataOptions.addOption(list); metadataOptions.addOption(relations); - Option url = new Option(URL_OPTION, true, "Falcon URL"); - Option type = new Option(FalconCLIConstants.TYPE_OPT, true, "Dimension type"); - Option name = new Option(FalconCLIConstants.NAME_OPT, true, "Dimension name"); - Option cluster = new Option(FalconCLIConstants.CLUSTER_OPT, true, "Cluster name"); - Option feed = new Option(FalconCLIConstants.FEED_OPT, true, "Feed Entity name"); - Option process = new Option(FalconCLIConstants.PROCESS_OPT, true, "Process Entity name"); - Option numResults = new Option(FalconCLIConstants.NUM_RESULTS_OPT, true, - "Number of results to return per request"); + Option url = new Option(URL_OPTION, true, URL_OPTION_DESCRIPTION); + Option type = new Option(TYPE_OPT, true, TYPE_OPT_DESCRIPTION); + Option name = new Option(NAME_OPT, true, NAME_OPT_DESCRIPTION); + Option cluster = new Option(CLUSTER_OPT, true, CLUSTER_OPT_DESCRIPTION); + Option feed = new Option(FEED_OPT, true, FEED_OPT_DESCRIPTION); + Option process = new Option(PROCESS_OPT, true, PROCESS_OPT_DESCRIPTION); + Option numResults = new Option(NUM_RESULTS_OPT, true, NUM_RESULTS_OPT_DESCRIPTION); // Add lineage options metadataOptions.addOption(pipeline); @@ -100,16 +137,15 @@ public Options createMetadataOptions() { metadataOptions.addOption(process); metadataOptions.addOption(numResults); - Option vertex = new Option(VERTEX_CMD, false, "show the vertices"); - Option vertices = new Option(VERTICES_CMD, false, "show the vertices"); - Option vertexEdges = new Option(VERTEX_EDGES_CMD, false, "show the edges for a given vertex"); - Option edges = new Option(EDGE_CMD, false, "show the edges"); - Option id = new Option(ID_OPT, true, "vertex or edge id"); - Option key = new Option(KEY_OPT, true, "key property"); - Option value = new Option(VALUE_OPT, true, "value property"); - Option direction = new Option(DIRECTION_OPT, true, "edge direction property"); - Option debug = new Option(FalconCLIConstants.DEBUG_OPTION, false, - "Use debug mode to see debugging statements on stdout"); + Option vertex = new Option(VERTEX_CMD, false, VERTEX_CMD_DESCRIPTION); + Option vertices = new Option(VERTICES_CMD, false, VERTICES_CMD_DESCRIPTION); + Option vertexEdges = new Option(VERTEX_EDGES_CMD, false, VERTEX_EDGES_CMD_DESCRIPTION); + Option edges = new Option(EDGE_CMD, false, EDGE_CMD_DESCRIPTION); + Option id = new Option(ID_OPT, true, ID_OPT_DESCRIPTION); + Option key = new Option(KEY_OPT, true, KEY_OPT_DESCRIPTION); + Option value = new Option(VALUE_OPT, true, VALUE_OPT_DESCRIPTION); + Option direction = new Option(DIRECTION_OPT, true, DIRECTION_OPT_DESCRIPTION); + Option debug = new Option(DEBUG_OPTION, false, DEBUG_OPTION_DESCRIPTION); metadataOptions.addOption(vertex); metadataOptions.addOption(vertices); @@ -121,7 +157,8 @@ public Options createMetadataOptions() { metadataOptions.addOption(direction); metadataOptions.addOption(debug); - Option doAs = new Option(FalconCLIConstants.DO_AS_OPT, true, "doAs user"); + Option doAs = new Option(DO_AS_OPT, true, DO_AS_DESCRIPTION); + metadataOptions.addOption(doAs); return metadataOptions; @@ -196,63 +233,5 @@ public void metadataCommand(CommandLine commandLine, FalconClient client) { OUT.get().println(result); } - private void validatePipelineName(String pipeline) { - if (StringUtils.isEmpty(pipeline)) { - throw new FalconCLIException("Invalid value for pipeline"); - } - } - - private void validateDimensionType(String dimensionType) { - if (StringUtils.isEmpty(dimensionType) - || dimensionType.contains("INSTANCE")) { - throw new FalconCLIException("Invalid value provided for queryParam \"type\" " + dimensionType); - } - try { - RelationshipType.valueOf(dimensionType); - } catch (IllegalArgumentException iae) { - throw new FalconCLIException("Invalid value provided for queryParam \"type\" " + dimensionType); - } - } - - private void validateDimensionName(String dimensionName, String action) { - if (StringUtils.isEmpty(dimensionName)) { - throw new FalconCLIException("Dimension ID cannot be empty or null for action " + action); - } - } - - private void validateScheduleEntity(String schedEntityType, String schedEntityName) { - if (StringUtils.isBlank(schedEntityType)) { - throw new FalconCLIException("Entity must be schedulable type : -feed/process"); - } - if (StringUtils.isBlank(schedEntityName)) { - throw new FalconCLIException("Entity name is missing"); - } - } - - private void validateId(String id) { - if (id == null || id.length() == 0) { - throw new FalconCLIException("Missing argument: id"); - } - } - - private void validateVerticesCommand(String key, String value) { - if (key == null || key.length() == 0) { - throw new FalconCLIException("Missing argument: key"); - } - - if (value == null || value.length() == 0) { - throw new FalconCLIException("Missing argument: value"); - } - } - - private void validateVertexEdgesCommand(String id, String direction) { - if (id == null || id.length() == 0) { - throw new FalconCLIException("Missing argument: id"); - } - - if (direction == null || direction.length() == 0) { - throw new FalconCLIException("Missing argument: direction"); - } - } } diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java b/cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java deleted file mode 100644 index 5b62cf085..000000000 --- a/cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.falcon.cli.commands; - -import org.springframework.stereotype.Component; - -/** - * Instance commands. - */ -@Component -public class FalconInstanceCommands extends BaseFalconCommands { - -} diff --git a/client/src/main/java/org/apache/falcon/FalconCLIConstants.java b/client/src/main/java/org/apache/falcon/FalconCLIConstants.java deleted file mode 100644 index bcf3fe66c..000000000 --- a/client/src/main/java/org/apache/falcon/FalconCLIConstants.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.falcon; - -/** -* FalconCLI Constants. -*/ -public final class FalconCLIConstants { - private FalconCLIConstants(){ - - } - public static final String ENV_FALCON_DEBUG = "FALCON_DEBUG"; - public static final String DEBUG_OPTION = "debug"; - public static final String URL_OPTION = "url"; - public static final String DEFINITION_OPT = "definition"; - public static final String LOOKUP_OPT = "lookup"; - public static final String SLA_MISS_ALERT_OPT = "slaAlert"; - public static final String TOUCH_OPT = "touch"; - public static final String ADMIN_CMD = "admin"; - public static final String HELP_CMD = "help"; - public static final String METADATA_CMD = "metadata"; - public static final String ENTITY_CMD = "entity"; - public static final String INSTANCE_CMD = "instance"; - public static final String EXTENSION_CMD = "extension"; - public static final String SAFE_MODE_OPT = "setsafemode"; - public static final String TYPE_OPT = "type"; - public static final String COLO_OPT = "colo"; - public static final String CLUSTER_OPT = "cluster"; - public static final String FEED_OPT = "feed"; - public static final String PROCESS_OPT = "process"; - public static final String ENTITY_NAME_OPT = "name"; - public static final String FILE_PATH_OPT = "file"; - public static final String VERSION_OPT = "version"; - public static final String SUBMIT_OPT = "submit"; - public static final String UPDATE_OPT = "update"; - public static final String UPDATE_CLUSTER_DEPENDENTS_OPT = "updateClusterDependents"; - public static final String DELETE_OPT = "delete"; - public static final String SUBMIT_AND_SCHEDULE_OPT = "submitAndSchedule"; - public static final String VALIDATE_OPT = "validate"; - public static final String SCHEDULE_OPT = "schedule"; - public static final String SUSPEND_OPT = "suspend"; - public static final String RESUME_OPT = "resume"; - public static final String STATUS_OPT = "status"; - public static final String SUMMARY_OPT = "summary"; - public static final String DEPENDENCY_OPT = "dependency"; - public static final String LIST_OPT = "list"; - public static final String SKIPDRYRUN_OPT = "skipDryRun"; - public static final String FIELDS_OPT = "fields"; - public static final String INSTANCE_STATUS_OPT = "instanceStatus"; - public static final String NAMESEQ_OPT = "nameseq"; - public static final String TAGKEYS_OPT = "tagkeys"; - public static final String FILTER_BY_OPT = "filterBy"; - public static final String ORDER_BY_OPT = "orderBy"; - public static final String SORT_ORDER_OPT = "sortOrder"; - public static final String OFFSET_OPT = "offset"; - public static final String NUM_RESULTS_OPT = "numResults"; - public static final String START_OPT = "start"; - public static final String END_OPT = "end"; - public static final String CURRENT_COLO = "current.colo"; - public static final String CLIENT_PROPERTIES = "/client.properties"; - public static final String DO_AS_OPT = "doAs"; - public static final String RELATIONS_OPT = "relations"; - public static final String PIPELINE_OPT = "pipeline"; - public static final String NAME_OPT = "name"; -} diff --git a/client/src/main/java/org/apache/falcon/ValidationUtil.java b/client/src/main/java/org/apache/falcon/ValidationUtil.java new file mode 100644 index 000000000..cdf1f0594 --- /dev/null +++ b/client/src/main/java/org/apache/falcon/ValidationUtil.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon; + +import org.apache.commons.lang3.StringUtils; +import org.apache.falcon.client.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIException; +import org.apache.falcon.entity.v0.EntityType; +import org.apache.falcon.metadata.RelationshipType; +import org.apache.falcon.resource.EntityList; +import org.apache.falcon.resource.InstancesResult; +import org.apache.falcon.resource.InstancesSummaryResult; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * All the validation methods to check the params from CLI and Shell. + */ +public final class ValidationUtil { + + private ValidationUtil(){} + + public static void validateEntityFields(String fields) { + if (StringUtils.isEmpty(fields)) { + return; + } + String[] fieldsList = fields.split(","); + for (String s : fieldsList) { + try { + EntityList.EntityFieldList.valueOf(s.toUpperCase()); + } catch (IllegalArgumentException ie) { + throw new FalconCLIException("Invalid fields argument : " + FalconCLIConstants.FIELDS_OPT); + } + } + } + + public static void validateOrderBy(String orderBy, String action) { + if (StringUtils.isBlank(orderBy)) { + return; + } + if (action.equals("instance")) { + if (Arrays.asList(new String[]{"status", "cluster", "starttime", "endtime"}) + .contains(orderBy.toLowerCase())) { + return; + } + } else if (action.equals("entity")) { + if (Arrays.asList(new String[] {"type", "name"}).contains(orderBy.toLowerCase())) { + return; + } + } else if (action.equals("summary")) { + if (Arrays.asList(new String[]{"cluster"}) + .contains(orderBy.toLowerCase())) { + return; + } + } + throw new FalconCLIException("Invalid orderBy argument : " + orderBy); + } + + public static void validateFilterBy(String filterBy, String filterType) { + if (StringUtils.isBlank(filterBy)) { + return; + } + String[] filterSplits = filterBy.split(","); + for (String s : filterSplits) { + String[] tempKeyVal = s.split(":", 2); + try { + if (filterType.equals("entity")) { + EntityList.EntityFilterByFields.valueOf(tempKeyVal[0].toUpperCase()); + } else if (filterType.equals("instance")) { + InstancesResult.InstanceFilterFields.valueOf(tempKeyVal[0].toUpperCase()); + }else if (filterType.equals("summary")) { + InstancesSummaryResult.InstanceSummaryFilterFields.valueOf(tempKeyVal[0].toUpperCase()); + } else { + throw new IllegalArgumentException("Invalid API call: filterType is not valid"); + } + } catch (IllegalArgumentException ie) { + throw new FalconCLIException("Invalid filterBy argument : " + tempKeyVal[0] + " in : " + s); + } + } + } + + public static void validateEntityTypeForSummary(String type) { + EntityType entityType = EntityType.getEnum(type); + if (!entityType.isSchedulable()) { + throw new FalconCLIException("Invalid entity type " + entityType + + " for EntitySummary API. Valid options are feed or process"); + } + } + + public static List getLifeCycle(String lifeCycleValue) { + if (lifeCycleValue != null) { + String[] lifeCycleValues = lifeCycleValue.split(","); + List lifeCycles = new ArrayList(); + try { + for (String lifeCycle : lifeCycleValues) { + lifeCycles.add(LifeCycle.valueOf(lifeCycle.toUpperCase().trim())); + } + } catch (IllegalArgumentException e) { + throw new FalconCLIException("Invalid life cycle values: " + lifeCycles, e); + } + return lifeCycles; + } + return null; + } + + public static void validateDimensionName(String dimensionName, String action) { + if (StringUtils.isBlank(dimensionName)) { + throw new FalconCLIException("Dimension ID cannot be empty or null for action " + action); + } + } + + public static void validateDimensionType(String dimensionType) { + if (StringUtils.isBlank(dimensionType) + || dimensionType.contains("INSTANCE")) { + throw new FalconCLIException("Invalid value provided for queryParam \"type\" " + dimensionType); + } + try { + RelationshipType.valueOf(dimensionType); + } catch (IllegalArgumentException iae) { + throw new FalconCLIException("Invalid value provided for queryParam \"type\" " + dimensionType); + } + } + + public static void validateId(String id) { + if (id == null || id.length() == 0) { + throw new FalconCLIException("Missing argument: id"); + } + } + + public static void validateScheduleEntity(String schedEntityType, String schedEntityName) { + if (StringUtils.isBlank(schedEntityType)) { + throw new FalconCLIException("Entity must be schedulable type : -feed/process"); + } + + if (StringUtils.isBlank(schedEntityName)) { + throw new FalconCLIException("Entity name is missing"); + } + } + + public static void validateVertexEdgesCommand(String id, String direction) { + if (id == null || id.length() == 0) { + throw new FalconCLIException("Missing argument: id"); + } + + if (direction == null || direction.length() == 0) { + throw new FalconCLIException("Missing argument: direction"); + } + } + + public static void validateVerticesCommand(String key, String value) { + if (key == null || key.length() == 0) { + throw new FalconCLIException("Missing argument: key"); + } + + if (value == null || value.length() == 0) { + throw new FalconCLIException("Missing argument: value"); + } + } + + public static void validatePipelineName(String pipeline) { + if (StringUtils.isBlank(pipeline)) { + throw new FalconCLIException("Invalid value for pipeline"); + } + } + + public static void validateNotEmpty(String paramVal, String paramName) { + if (StringUtils.isBlank(paramVal)) { + throw new FalconCLIException("Missing argument : " + paramName); + } + } + + public static void validateSortOrder(String sortOrder) { + if (!StringUtils.isBlank(sortOrder)) { + if (!sortOrder.equalsIgnoreCase("asc") && !sortOrder.equalsIgnoreCase("desc")) { + throw new FalconCLIException("Value for param sortOrder should be \"asc\" or \"desc\". It is : " + + sortOrder); + } + } + } + +} diff --git a/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java b/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java index b3fd3ae65..5d6eff5f2 100644 --- a/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java +++ b/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java @@ -28,7 +28,9 @@ import org.apache.falcon.resource.InstanceDependencyResult; import org.apache.falcon.resource.InstancesResult; import org.apache.falcon.resource.InstancesSummaryResult; +import org.apache.falcon.resource.LineageGraphResult; import org.apache.falcon.resource.SchedulableEntityInstanceResult; +import org.apache.falcon.resource.TriageResult; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -460,4 +462,34 @@ public abstract SchedulableEntityInstanceResult getFeedSlaMissPendingAlerts(Stri public abstract FeedLookupResult reverseLookUp(String entityType, String path, String doAs); public abstract EntityList getDependency(String entityType, String entityName, String doAs); + + public abstract TriageResult triage(String name, String entityName, String start, String colo); + // SUSPEND CHECKSTYLE CHECK ParameterNumberCheck + public abstract InstancesResult getRunningInstances(String type, String entity, String colo, + List lifeCycles, + String filterBy, String orderBy, String sortOrder, + Integer offset, Integer numResults, String doAsUser); + // RESUME CHECKSTYLE CHECK ParameterNumberCheck + public abstract FeedInstanceResult getFeedInstanceListing(String type, String entity, String start, String end, + String colo, String doAsUser); + public abstract int getStatus(String doAsUser); + + public abstract String getThreadDump(String doAs); + + public abstract LineageGraphResult getEntityLineageGraph(String pipeline, String doAs); + + public abstract String getDimensionList(String dimensionType, String cluster, String doAs); + + public abstract String getReplicationMetricsDimensionList(String schedEntityType, String schedEntityName, + Integer numResults, String doAs); + + public abstract String getDimensionRelations(String dimensionType, String dimensionName, String doAs); + + public abstract String getVertex(String id, String doAs); + + public abstract String getVertices(String key, String value, String doAs); + + public abstract String getVertexEdges(String id, String direction, String doAs); + + public abstract String getEdge(String id, String doAs); } diff --git a/client/src/main/java/org/apache/falcon/client/FalconCLIConstants.java b/client/src/main/java/org/apache/falcon/client/FalconCLIConstants.java new file mode 100644 index 000000000..04f15992c --- /dev/null +++ b/client/src/main/java/org/apache/falcon/client/FalconCLIConstants.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon.client; + +/** +* FalconCLI Constants. +*/ +public final class FalconCLIConstants { + private FalconCLIConstants(){ + + } + public static final String ENV_FALCON_DEBUG = "FALCON_DEBUG"; + public static final String DEFINITION_OPT = "definition"; + public static final String LOOKUP_OPT = "lookup"; + public static final String SLA_MISS_ALERT_OPT = "slaAlert"; + public static final String TOUCH_OPT = "touch"; + public static final String ADMIN_CMD = "admin"; + public static final String HELP_CMD = "help"; + public static final String METADATA_CMD = "metadata"; + public static final String ENTITY_CMD = "entity"; + public static final String INSTANCE_CMD = "instance"; + public static final String EXTENSION_CMD = "extension"; + public static final String SAFE_MODE_OPT = "setsafemode"; + public static final String VERSION_OPT = "version"; + public static final String SUBMIT_OPT = "submit"; + public static final String SUBMIT_ONLY_OPT = "submitOnly"; + public static final String UPDATE_OPT = "update"; + public static final String UPDATE_CLUSTER_DEPENDENTS_OPT = "updateClusterDependents"; + public static final String DELETE_OPT = "delete"; + public static final String SCHEDULE_OPT = "schedule"; + public static final String CURRENT_COLO = "current.colo"; + public static final String CLIENT_PROPERTIES = "/client.properties"; + public static final String RELATIONS_OPT = "relations"; + public static final String PIPELINE_OPT = "pipeline"; + public static final String NAME_OPT = "name"; + public static final String VERSION_OPT_DESCRIPTION = "show Falcon server build version"; + public static final String STACK_OPTION_DESCRIPTION = "show the thread stack dump"; + public static final String FALCON_URL = "FALCON_URL"; + public static final String STACK_OPTION = "stack"; + public static final String SUBMIT_OPT_DESCRIPTION = "Submits an entity xml to Falcon"; + public static final String UPDATE_OPT_DESCRIPTION = "Updates an existing entity"; + public static final String DELETE_OPT_DESCRIPTION = "Deletes an entity in Falcon, and kills its instance from " + + "workflow engine"; + public static final String SUBMIT_AND_SCHEDULE_OPT = "submitAndSchedule"; + public static final String SUBMIT_AND_SCHEDULE_OPT_DESCRIPTION = "Submits an entity to Falcon and " + + "schedules it immediately"; + public static final String VALIDATE_OPT = "validate"; + public static final String VALIDATE_OPT_DESCRIPTION = "Validates an entity based on the entity type"; + public static final String DEFINITION_OPT_DESCRIPTION = "Gets the Definition of entity"; + public static final String SLA_MISS_ALERT_OPT_DESCRIPTION = "Get missing feed instances which missed SLA"; + + + public static final String LOOKUP_OPT_DESCRIPTION = "Lookup a feed given its instance's path"; + public static final String PATH_OPT = "path"; + public static final String PATH_OPT_DESCRIPTION = "Path for a feed's instance"; + public static final String TOUCH_OPT_DESCRIPTION = "Force update the entity in workflow engine" + + "(even without any changes to entity)"; + public static final String PROPS_OPT = "properties"; + public static final String PROPS_OPT_DESCRIPTION = "User supplied comma separated key value properties"; + public static final String FIELDS_OPT = "fields"; + public static final String FIELDS_OPT_DESCRIPTION = "Entity fields to show for a request"; + public static final String TAGS_OPT = "tags"; + public static final String TAGS_OPT_DESCRIPTION = "Filter returned entities by the specified tags"; + public static final String NUM_INSTANCES_OPT = "numInstances"; + public static final String NUM_INSTANCES_OPT_DESCRIPTION = "Number of instances to return per entity " + + "summary request"; + public static final String NAMESEQ_OPT = "nameseq"; + public static final String NAMESEQ_OPT_DESCRIPTION = "Subsequence of entity name"; + public static final String TAGKEYS_OPT = "tagkeys"; + public static final String TAGKEYS_OPT_DESCRIPTION = "Keywords in tags"; + public static final String SHOWSCHEDULER_OPT = "showScheduler"; + public static final String SHOWSCHEDULER_OPT_DESCRIPTION = "To return the scheduler " + + "on which the entity is scheduled."; + + public static final String DEBUG_OPTION = "debug"; + public static final String URL_OPTION = "url"; + public static final String TYPE_OPT = "type"; + public static final String COLO_OPT = "colo"; + public static final String CLUSTER_OPT = "cluster"; + public static final String FEED_OPT = "feed"; + public static final String PROCESS_OPT = "process"; + public static final String ENTITY_NAME_OPT = "name"; + public static final String FILE_PATH_OPT = "file"; + public static final String SUSPEND_OPT = "suspend"; + public static final String RESUME_OPT = "resume"; + public static final String STATUS_OPT = "status"; + public static final String SUMMARY_OPT = "summary"; + public static final String DEPENDENCY_OPT = "dependency"; + public static final String SKIPDRYRUN_OPT = "skipDryRun"; + public static final String FILTER_BY_OPT = "filterBy"; + public static final String ORDER_BY_OPT = "orderBy"; + public static final String SORT_ORDER_OPT = "sortOrder"; + public static final String OFFSET_OPT = "offset"; + public static final String NUM_RESULTS_OPT = "numResults"; + public static final String START_OPT = "start"; + public static final String END_OPT = "end"; + public static final String DO_AS_OPT = "doAs"; + public static final String RUNNING_OPT_DESCRIPTION = "Gets running process instances for a given process"; + public static final String LIST_OPT_DESCRIPTION = "Gets all instances for a given entity in the range start " + + "time and optional end time"; + public static final String STATUS_OPT_DESCRIPTION = "Gets status of process instances for a given process in" + + " the range start time and optional end time"; + public static final String SUMMARY_OPT_DESCRIPTION = "Gets summary of instances for a given process in the" + + " range start time and optional end time"; + public static final String KILL_OPT_DESCRIPTION = "Kills active process instances for a given process in the" + + " range start time and optional end time"; + public static final String SUSPEND_OPT_DESCRIPTION = "Suspends active process instances for a given process in" + + " the range start time and optional end time"; + public static final String RESUME_OPT_DESCRIPTION = "Resumes suspended process instances for a given" + + " process in the range start time and optional end time"; + public static final String RERUN_OPT_DESCRIPTION = "Reruns process instances for a given process in the" + + " range start time and optional end time and overrides properties present in job.properties file"; + public static final String LOG_OPT_DESCRIPTION = "Logs print the logs for process instances for a given" + + " process in the range start time and optional end time"; + public static final String PARARMS_OPT_DESCRIPTION = "Displays the workflow parameters for a given instance" + + " of specified nominal time start time represents nominal time and end time is not considered"; + public static final String LISTING_OPT_DESCRIPTION = "Displays feed listing and their status between a" + + " start and end time range."; + public static final String DEPENDENCY_OPT_DESCRIPTION = "Displays dependent instances for a specified" + + " instance."; + public static final String TRIAGE_OPT_DESCRIPTION = "Triage a feed or process instance and find the failures" + + " in it's lineage."; + public static final String URL_OPTION_DESCRIPTION = "Falcon URL"; + public static final String START_OPT_DESCRIPTION = "Start time is required for commands, status, kill, " + + "suspend, resume and re-runand it is nominal time while displaying workflow params"; + public static final String END_OPT_DESCRIPTION = "End time is optional for commands, status, kill, suspend, " + + "resume and re-run; if not specified then current time is considered as end time"; + public static final String RUNID_OPT_DESCRIPTION = "Instance runid is optional and user can specify the " + + "runid, defaults to 0"; + public static final String CLUSTERS_OPT_DESCRIPTION = "clusters is optional for commands kill, suspend and " + + "resume, should not be specified for other commands"; + public static final String SOURCECLUSTER_OPT_DESCRIPTION = " source cluster is optional for commands kill, " + + "suspend and resume, should not be specified for other commands (required for only feed)"; + public static final String FILE_PATH_OPT_DESCRIPTION = "Path to job.properties file is required for rerun " + + "command, it should contain name=value pair for properties to override for rerun"; + public static final String TYPE_OPT_DESCRIPTION = "Entity type, can be feed or process xml"; + public static final String ENTITY_NAME_OPT_DESCRIPTION = "Entity name, can be feed or process name"; + public static final String COLO_OPT_DESCRIPTION = "Colo on which the cmd has to be executed"; + public static final String LIFECYCLE_OPT_DESCRIPTION = "describes life cycle of entity , for feed it can be " + + "replication/retention and for process it can be execution"; + public static final String FILTER_BY_OPT_DESCRIPTION = "Filter returned instances by the specified fields"; + public static final String ORDER_BY_OPT_DESCRIPTION = "Order returned instances by this field"; + public static final String SORT_ORDER_OPT_DESCRIPTION = "asc or desc order for results"; + public static final String OFFSET_OPT_DESCRIPTION = "Start returning instances from this offset"; + public static final String FORCE_RERUN_FLAG_DESCRIPTION = "Flag to forcefully rerun entire workflow " + + "of an instance"; + public static final String DO_AS_OPT_DESCRIPTION = "doAs user"; + public static final String INSTANCE_TIME_OPT_DESCRIPTION = "Time for an instance"; + public static final String ALL_ATTEMPTS_DESCRIPTION = "To get all attempts of corresponding instances"; + public static final String FORCE_RERUN_FLAG = "force"; + public static final String INSTANCE_TIME_OPT = "instanceTime"; + public static final String RUNNING_OPT = "running"; + public static final String KILL_OPT = "kill"; + public static final String RERUN_OPT = "rerun"; + public static final String LOG_OPT = "logs"; + public static final String CLUSTERS_OPT = "clusters"; + public static final String SOURCECLUSTER_OPT = "sourceClusters"; + public static final String LIFECYCLE_OPT = "lifecycle"; + public static final String PARARMS_OPT = "params"; + public static final String LISTING_OPT = "listing"; + public static final String TRIAGE_OPT = "triage"; + public static final String SKIPDRYRUN_OPT_DESCRIPTION = "skip dry run in workflow engine"; + public static final String SCHEDULE_OPT_DESCRIPTION = "Schedules a submited entity in Falcon"; + public static final String ALL_ATTEMPTS = "allAttempts"; + public static final String RUNID_OPT = "runid"; + public static final String INSTANCE_STATUS_OPT = "instanceStatus"; + public static final String SEARCH_OPT = "search"; + + + // Discovery Commands + public static final String DISCOVERY_OPT = "discovery"; + public static final String LIST_OPT = "list"; + + // Lineage Commands + public static final String LINEAGE_OPT = "lineage"; + public static final String VERTEX_CMD = "vertex"; + public static final String VERTICES_CMD = "vertices"; + public static final String VERTEX_EDGES_CMD = "edges"; + public static final String EDGE_CMD = "edge"; + public static final String ID_OPT = "id"; + public static final String KEY_OPT = "key"; + public static final String VALUE_OPT = "value"; + public static final String DIRECTION_OPT = "direction"; + + public static final String DISCOVERY_OPT_DESCRIPTION = "Discover falcon metadata relations"; + public static final String LINEAGE_OPT_DESCRIPTION = "Get falcon metadata lineage information"; + public static final String PIPELINE_OPT_DESCRIPTION = "Get lineage graph for the entities in a pipeline"; + public static final String RELATIONS_OPT_DESCRIPTION = "List all relations for a dimension"; + public static final String NAME_OPT_DESCRIPTION = "Dimension name"; + public static final String CLUSTER_OPT_DESCRIPTION = "Cluster name"; + public static final String FEED_OPT_DESCRIPTION = "Feed Entity name"; + public static final String PROCESS_OPT_DESCRIPTION = "Process Entity name"; + public static final String NUM_RESULTS_OPT_DESCRIPTION = "Number of results to return per request"; + public static final String VERTEX_CMD_DESCRIPTION = "show the vertices"; + public static final String VERTICES_CMD_DESCRIPTION = "show the vertices"; + public static final String VERTEX_EDGES_CMD_DESCRIPTION = "show the edges for a given vertex"; + public static final String EDGE_CMD_DESCRIPTION = "show the edges"; + public static final String ID_OPT_DESCRIPTION = "vertex or edge id"; + public static final String KEY_OPT_DESCRIPTION = "key property"; + public static final String VALUE_OPT_DESCRIPTION = "value property"; + public static final String DIRECTION_OPT_DESCRIPTION = "edge direction property"; + public static final String DEBUG_OPTION_DESCRIPTION = "Use debug mode to see debugging statements on stdout"; + public static final String DO_AS_DESCRIPTION = "doAs user"; +} diff --git a/client/src/main/java/org/apache/falcon/client/FalconClient.java b/client/src/main/java/org/apache/falcon/client/FalconClient.java index 4716019b8..8f77faddb 100644 --- a/client/src/main/java/org/apache/falcon/client/FalconClient.java +++ b/client/src/main/java/org/apache/falcon/client/FalconClient.java @@ -26,7 +26,6 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.net.util.TrustManagerUtils; -import org.apache.falcon.FalconCLIConstants; import org.apache.falcon.LifeCycle; import org.apache.falcon.entity.v0.DateValidator; import org.apache.falcon.entity.v0.Entity; @@ -540,7 +539,6 @@ public APIResult touch(String entityType, String entityName, String colo, .addQueryParam(DO_AS_OPT, doAsUser).call(operation); return getResponse(APIResult.class, clientResponse); } - public InstancesResult getRunningInstances(String type, String entity, String colo, List lifeCycles, String filterBy, String orderBy, String sortOrder, Integer offset, Integer numResults, String doAsUser) { @@ -551,7 +549,6 @@ public InstancesResult getRunningInstances(String type, String entity, String co .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(USER, doAsUser).call(Instances.RUNNING); return getResponse(InstancesResult.class, clientResponse); } - @Override public InstancesResult getStatusOfInstances(String type, String entity, String start, String end, String colo, List lifeCycles, String filterBy, String orderBy, @@ -759,8 +756,7 @@ private InputStream getServletInputStream(String filePath) { return stream; } - private T getResponse(Class clazz, - ClientResponse clientResponse) { + private T getResponse(Class clazz, ClientResponse clientResponse) { printClientResponse(clientResponse); checkIfSuccessful(clientResponse); return clientResponse.getEntity(clazz); diff --git a/common/pom.xml b/common/pom.xml index 5b0188fda..96cb7f571 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -210,12 +210,6 @@ compile - - javax.validation - validation-api - ${javax-validation.version} - - org.apache.spark spark-core_2.10 @@ -241,6 +235,11 @@ 0.9.3 test + + javax.validation + validation-api + ${javax-validation.version} + diff --git a/common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java b/common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java new file mode 100644 index 000000000..a256e4652 --- /dev/null +++ b/common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon.metadata; + +import com.tinkerpop.blueprints.Graph; +import com.tinkerpop.blueprints.util.io.graphson.GraphSONReader; +import com.tinkerpop.blueprints.util.io.graphson.GraphSONWriter; +import org.apache.commons.configuration.Configuration; +import org.apache.commons.io.FileUtils; +import org.apache.falcon.FalconException; + +import java.io.File; + +/** + * Utility class for graph operations. + */ +public final class GraphUpdateUtils { + + private static final String BANNER_MSG = + "Before running this utility please make sure that Falcon startup properties " + + "has the right configuration settings for the graph database, " + + "Falcon server is stopped and no other access to the graph database is being performed."; + + private static final String IMPORT = "import"; + private static final String EXPORT = "export"; + private static final String INSTANCE_JSON_FILE = "instanceMetadata.json"; + + private GraphUpdateUtils() { + } + + public static void main(String[] args) { + if (args.length != 2) { + usage(); + System.exit(1); + } + System.out.println(BANNER_MSG); + String operation = args[0].toLowerCase(); + if (!(operation.equals(EXPORT) || operation.equals(IMPORT))) { + usage(); + System.exit(1); + } + String utilsDir = args[1]; + File utilsDirFile = new File(utilsDir); + if (!utilsDirFile.isDirectory()) { + System.err.println(utilsDir + " is not a valid directory"); + System.exit(1); + } + String jsonFile = new File(utilsDirFile, INSTANCE_JSON_FILE).getAbsolutePath(); + try { + Graph graph; + if (operation.equals(EXPORT)) { + graph = MetadataMappingService.initializeGraphDB(); + GraphSONWriter.outputGraph(graph, jsonFile); + System.out.println("Exported instance metadata to " + jsonFile); + } else { + // Backup existing graphDB dir + Configuration graphConfig = MetadataMappingService.getConfiguration(); + String graphStore = (String) graphConfig.getProperty("storage.directory"); + File graphStoreFile = new File(graphStore); + File graphDirBackup = new File(graphStore + "_backup"); + if (graphDirBackup.exists()) { + FileUtils.deleteDirectory(graphDirBackup); + } + FileUtils.copyDirectory(graphStoreFile, graphDirBackup); + + // delete graph dir first and then init graphDB to ensure IMPORT happens into empty DB. + FileUtils.deleteDirectory(graphStoreFile); + graph = MetadataMappingService.initializeGraphDB(); + + // Import, if there is an exception restore backup. + try { + GraphSONReader.inputGraph(graph, jsonFile); + System.out.println("Imported instance metadata to " + jsonFile); + } catch (Exception ex) { + String errorMsg = ex.getMessage(); + if (graphStoreFile.exists()) { + FileUtils.deleteDirectory(graphStoreFile); + } + FileUtils.copyDirectory(graphDirBackup, graphStoreFile); + throw new FalconException(errorMsg); + } + } + } catch (Exception e) { + System.err.println("Error " + operation + "ing JSON data to " + jsonFile + ", " + e.getMessage()); + e.printStackTrace(System.out); + System.exit(1); + } + System.exit(0); + } + + public static void usage() { + StringBuilder usageMessage = new StringBuilder(1024); + usageMessage.append("usage: java ").append(GraphUpdateUtils.class.getName()) + .append(" {").append(EXPORT).append('|').append(IMPORT).append("} "); + System.err.println(usageMessage); + } +} diff --git a/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java b/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java index 727be5606..225e44a7e 100644 --- a/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java +++ b/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java @@ -139,14 +139,14 @@ public void init() throws FalconException { } } - protected Graph initializeGraphDB() { + public static Graph initializeGraphDB() { LOG.info("Initializing graph db"); Configuration graphConfig = getConfiguration(); validateConfiguration(graphConfig); return GraphFactory.open(graphConfig); } - private void validateConfiguration(Configuration graphConfig) { + private static void validateConfiguration(Configuration graphConfig) { // check if storage backend if configured if (!graphConfig.containsKey(PROPERTY_KEY_STORAGE_BACKEND)) { throw new FalconRuntimException("Titan GraphDB storage backend is not configured. " diff --git a/common/src/main/java/org/apache/falcon/persistence/BacklogMetricBean.java b/common/src/main/java/org/apache/falcon/persistence/BacklogMetricBean.java new file mode 100644 index 000000000..b563da7b7 --- /dev/null +++ b/common/src/main/java/org/apache/falcon/persistence/BacklogMetricBean.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.falcon.persistence; + +import org.apache.openjpa.persistence.jdbc.Index; + +import javax.persistence.Basic; +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.NamedQueries; +import javax.persistence.NamedQuery; +import javax.persistence.Table; +import javax.validation.constraints.NotNull; +import java.util.Date; + +//SUSPEND CHECKSTYLE CHECK LineLengthCheck +/** + * Backlog Metric Object stored in DB. + */ +@Entity +@NamedQueries({ + @NamedQuery(name = PersistenceConstants.GET_ALL_BACKLOG_INSTANCES, query = "select OBJECT(a) from BacklogMetricBean a "), + @NamedQuery(name = PersistenceConstants.DELETE_BACKLOG_METRIC_INSTANCE, query = "delete from BacklogMetricBean a where a.entityName = :entityName and a.clusterName = :clusterName and a.nominalTime = :nominalTime and a.entityType = :entityType") +}) +//RESUME CHECKSTYLE CHECK LineLengthCheck + +@Table(name = "BACKLOG_METRIC") +public class BacklogMetricBean { + + @NotNull + @GeneratedValue(strategy = GenerationType.AUTO) + @Id + private String id; + + @Basic + @NotNull + @Index + @Column(name = "entity_name") + private String entityName; + + @Basic + @NotNull + @Column(name = "cluster_name") + private String clusterName; + + @Basic + @NotNull + @Index + @Column(name = "nominal_time") + private Date nominalTime; + + @Basic + @NotNull + @Index + @Column(name = "entity_type") + private String entityType; + + + public String getId() { + return id; + } + + public String getEntityName() { + return entityName; + } + + public String getClusterName() { + return clusterName; + } + + public Date getNominalTime() { + return nominalTime; + } + + public void setId(String id) { + this.id = id; + } + + public void setEntityName(String entityName) { + this.entityName = entityName; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public void setNominalTime(Date nominalTime) { + this.nominalTime = nominalTime; + } + + public String getEntityType() { + return entityType; + } + + public void setEntityType(String entityType) { + this.entityType = entityType; + } +} diff --git a/common/src/main/java/org/apache/falcon/persistence/PersistenceConstants.java b/common/src/main/java/org/apache/falcon/persistence/PersistenceConstants.java index 7c2479de3..5c3de51c3 100644 --- a/common/src/main/java/org/apache/falcon/persistence/PersistenceConstants.java +++ b/common/src/main/java/org/apache/falcon/persistence/PersistenceConstants.java @@ -61,5 +61,7 @@ private PersistenceConstants(){ public static final String UPDATE_SLA_HIGH = "UPDATE_SLA_HIGH"; public static final String GET_ENTITY_ALERT_INSTANCE = "GET_ENTITY_ALERT_INSTANCE"; public static final String DELETE_ENTITY_ALERT_INSTANCE = "DELETE_ENTITY_ALERT_INSTANCE"; + public static final String DELETE_BACKLOG_METRIC_INSTANCE = "DELETE_BACKLOG_METRIC_INSTANCE"; + public static final String GET_ALL_BACKLOG_INSTANCES = "GET_ALL_BACKLOG_INSTANCES"; public static final String GET_ALL_MONITORING_ENTITY = "GET_ALL_MONITORING_ENTITY"; } diff --git a/common/src/main/java/org/apache/falcon/tools/FalconStateStoreDBCLI.java b/common/src/main/java/org/apache/falcon/tools/FalconStateStoreDBCLI.java index 102b9864a..9c6e8b370 100644 --- a/common/src/main/java/org/apache/falcon/tools/FalconStateStoreDBCLI.java +++ b/common/src/main/java/org/apache/falcon/tools/FalconStateStoreDBCLI.java @@ -246,6 +246,7 @@ private String[] createMappingToolArguments(String sqlFile) throws Exception { args.add("org.apache.falcon.persistence.PendingInstanceBean"); args.add("org.apache.falcon.persistence.MonitoredEntityBean"); args.add("org.apache.falcon.persistence.EntitySLAAlertBean"); + args.add("org.apache.falcon.persistence.BacklogMetricBean"); return args.toArray(new String[args.size()]); } diff --git a/common/src/main/java/org/apache/falcon/util/DistCPOptionsUtil.java b/common/src/main/java/org/apache/falcon/util/DistCPOptionsUtil.java new file mode 100644 index 000000000..bbeb3e909 --- /dev/null +++ b/common/src/main/java/org/apache/falcon/util/DistCPOptionsUtil.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon.util; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.lang3.StringUtils; +import org.apache.falcon.FalconException; +import org.apache.falcon.hadoop.HadoopClientFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.tools.DistCpOptions; +import org.apache.hadoop.conf.Configuration; + +import java.io.IOException; +import java.util.List; + +/** + * Utility to set DistCp options. + */ +public final class DistCPOptionsUtil { + private static final String TDE_ENCRYPTION_ENABLED = "tdeEncryptionEnabled"; + + private DistCPOptionsUtil() {} + + public static DistCpOptions getDistCpOptions(CommandLine cmd, + List sourcePaths, + Path targetPath, + boolean isSnapshot, + Configuration conf) throws FalconException, IOException { + DistCpOptions distcpOptions = new DistCpOptions(sourcePaths, targetPath); + distcpOptions.setBlocking(true); + + distcpOptions.setMaxMaps(Integer.parseInt(cmd.getOptionValue("maxMaps"))); + distcpOptions.setMapBandwidth(Integer.parseInt(cmd.getOptionValue("mapBandwidth"))); + + String tdeEncryptionEnabled = cmd.getOptionValue(TDE_ENCRYPTION_ENABLED); + if (StringUtils.isNotBlank(tdeEncryptionEnabled) && tdeEncryptionEnabled.equalsIgnoreCase(Boolean.TRUE.toString())) { + distcpOptions.setSyncFolder(true); + distcpOptions.setSkipCRC(true); + } else { + if (!isSnapshot) { + String overwrite = cmd.getOptionValue(ReplicationDistCpOption.DISTCP_OPTION_OVERWRITE.getName()); + if (StringUtils.isNotEmpty(overwrite) && overwrite.equalsIgnoreCase(Boolean.TRUE.toString())) { + distcpOptions.setOverwrite(Boolean.parseBoolean(overwrite)); + } else { + distcpOptions.setSyncFolder(true); + } + } + + String skipChecksum = cmd.getOptionValue(ReplicationDistCpOption.DISTCP_OPTION_SKIP_CHECKSUM.getName()); + if (StringUtils.isNotEmpty(skipChecksum)) { + distcpOptions.setSkipCRC(Boolean.parseBoolean(skipChecksum)); + } + } + + if (isSnapshot) { + // Settings needed for Snapshot distCp. + distcpOptions.setSyncFolder(true); + distcpOptions.setDeleteMissing(true); + } else { + // Removing deleted files by default - FALCON-1844 + String removeDeletedFiles = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_REMOVE_DELETED_FILES.getName(), "true"); + boolean deleteMissing = Boolean.parseBoolean(removeDeletedFiles); + distcpOptions.setDeleteMissing(deleteMissing); + if (deleteMissing) { + // DistCP will fail with InvalidInputException if deleteMissing is set to true and + // if targetPath does not exist. Create targetPath to avoid failures. + FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(targetPath.toUri(), conf); + if (!fs.exists(targetPath)) { + fs.mkdirs(targetPath); + } + } + } + + String ignoreErrors = cmd.getOptionValue(ReplicationDistCpOption.DISTCP_OPTION_IGNORE_ERRORS.getName()); + if (StringUtils.isNotBlank(ignoreErrors)) { + distcpOptions.setIgnoreFailures(Boolean.parseBoolean(ignoreErrors)); + } + + String preserveBlockSize = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_BLOCK_SIZE.getName()); + if (StringUtils.isNotBlank(preserveBlockSize) && Boolean.parseBoolean(preserveBlockSize)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.BLOCKSIZE); + } + + String preserveReplicationCount = cmd.getOptionValue(ReplicationDistCpOption + .DISTCP_OPTION_PRESERVE_REPLICATION_NUMBER.getName()); + if (StringUtils.isNotBlank(preserveReplicationCount) && Boolean.parseBoolean(preserveReplicationCount)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.REPLICATION); + } + + String preservePermission = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_PERMISSIONS.getName()); + if (StringUtils.isNotBlank(preservePermission) && Boolean.parseBoolean(preservePermission)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.PERMISSION); + } + + String preserveUser = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_USER.getName()); + if (StringUtils.isNotBlank(preserveUser) && Boolean.parseBoolean(preserveUser)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.USER); + } + + String preserveGroup = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_GROUP.getName()); + if (StringUtils.isNotBlank(preserveGroup) && Boolean.parseBoolean(preserveGroup)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.GROUP); + } + + String preserveChecksumType = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_CHECKSUM_TYPE.getName()); + if (StringUtils.isNotBlank(preserveChecksumType) && Boolean.parseBoolean(preserveChecksumType)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.CHECKSUMTYPE); + } + + String preserveAcl = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_ACL.getName()); + if (StringUtils.isNotBlank(preserveAcl) && Boolean.parseBoolean(preserveAcl)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.ACL); + } + + String preserveXattr = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_XATTR.getName()); + if (StringUtils.isNotBlank(preserveXattr) && Boolean.parseBoolean(preserveXattr)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.XATTR); + } + + String preserveTimes = cmd.getOptionValue( + ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_TIMES.getName()); + if (StringUtils.isNotBlank(preserveTimes) && Boolean.parseBoolean(preserveTimes)) { + distcpOptions.preserve(DistCpOptions.FileAttribute.TIMES); + } + + return distcpOptions; + } +} diff --git a/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java b/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java index a8b99bb88..65f371c0a 100644 --- a/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java +++ b/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java @@ -29,7 +29,13 @@ public enum ReplicationDistCpOption { DISTCP_OPTION_REMOVE_DELETED_FILES("removeDeletedFiles"), DISTCP_OPTION_PRESERVE_BLOCK_SIZE("preserveBlockSize"), DISTCP_OPTION_PRESERVE_REPLICATION_NUMBER("preserveReplicationNumber"), - DISTCP_OPTION_PRESERVE_PERMISSIONS("preservePermission"); + DISTCP_OPTION_PRESERVE_PERMISSIONS("preservePermission"), + DISTCP_OPTION_PRESERVE_USER("preserveUser"), + DISTCP_OPTION_PRESERVE_GROUP("preserveGroup"), + DISTCP_OPTION_PRESERVE_CHECKSUM_TYPE("preserveChecksumType"), + DISTCP_OPTION_PRESERVE_ACL("preserveAcl"), + DISTCP_OPTION_PRESERVE_XATTR("preserveXattr"), + DISTCP_OPTION_PRESERVE_TIMES("preserveTimes"); private final String name; diff --git a/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java b/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java index 4d8402a75..0db7e9b18 100644 --- a/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java +++ b/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java @@ -72,6 +72,8 @@ public abstract String reRun(String cluster, String wfId, Properties props, bool public abstract boolean isCompleted(Entity entity) throws FalconException; + public abstract boolean isMissing(Entity entity) throws FalconException; + public abstract InstancesResult getRunningInstances(Entity entity, List lifeCycles) throws FalconException; diff --git a/common/src/main/resources/META-INF/persistence.xml b/common/src/main/resources/META-INF/persistence.xml index ac2f397cc..d58e21c01 100644 --- a/common/src/main/resources/META-INF/persistence.xml +++ b/common/src/main/resources/META-INF/persistence.xml @@ -28,6 +28,7 @@ org.apache.falcon.persistence.PendingInstanceBean org.apache.falcon.persistence.MonitoredEntityBean org.apache.falcon.persistence.EntitySLAAlertBean + org.apache.falcon.persistence.BacklogMetricBean @@ -60,7 +61,7 @@ org.apache.falcon.persistence.PendingInstanceBean org.apache.falcon.persistence.MonitoredEntityBean org.apache.falcon.persistence.EntitySLAAlertBean - + org.apache.falcon.persistence.BacklogMetricBean @@ -91,7 +92,7 @@ org.apache.falcon.persistence.MonitoredEntityBean org.apache.falcon.persistence.PendingInstanceBean org.apache.falcon.persistence.EntitySLAAlertBean - + org.apache.falcon.persistence.BacklogMetricBean @@ -100,8 +101,8 @@ - + org.apache.falcon.persistence.MonitoredEntityBean;org.apache.falcon.persistence.EntitySLAAlertBean; + org.apache.falcon.persistence.BacklogMetricBean)"/> diff --git a/common/src/main/resources/startup.properties b/common/src/main/resources/startup.properties index de246215e..4b692a237 100644 --- a/common/src/main/resources/startup.properties +++ b/common/src/main/resources/startup.properties @@ -332,3 +332,7 @@ it.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandle #*.falcon.graphite.port=2003 #*.falcon.graphite.frequency=1 #*.falcon.graphite.prefix=falcon + +# Backlog Metric Properties +#*.falcon.backlog.metricservice.emit.interval.millisecs=60000 +#*.falcon.backlog.metricservice.recheck.interval.millisecs=600000 diff --git a/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java b/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java index c4bfff645..3398c2625 100644 --- a/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java +++ b/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java @@ -103,7 +103,7 @@ public void testParse() throws FalconException, JAXBException { Assert.assertEquals(process.getTags(), "consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting"); - Assert.assertEquals(process.getPipelines(), "testPipeline,dataReplication_Pipeline"); + Assert.assertEquals(process.getPipelines(), "testPipeline"); Assert.assertEquals(process.getInputs().getInputs().get(0).getName(), "impression"); Assert.assertEquals(process.getInputs().getInputs().get(0).getFeed(), "impressionFeed"); diff --git a/common/src/test/resources/config/process/process-0.1.xml b/common/src/test/resources/config/process/process-0.1.xml index 4ce7ad1d1..155010176 100644 --- a/common/src/test/resources/config/process/process-0.1.xml +++ b/common/src/test/resources/config/process/process-0.1.xml @@ -18,7 +18,7 @@ --> consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting - testPipeline,dataReplication_Pipeline + testPipeline diff --git a/docs/src/site/twiki/EntitySpecification.twiki b/docs/src/site/twiki/EntitySpecification.twiki index 2615e5dd4..11d1e1b0b 100644 --- a/docs/src/site/twiki/EntitySpecification.twiki +++ b/docs/src/site/twiki/EntitySpecification.twiki @@ -399,6 +399,13 @@ permission indicates the permission. + + + + + + + @@ -414,7 +421,10 @@ used by each mapper during replication. "overwrite" represents overwrite destina bypassing checksum verification during replication. "removeDeletedFiles" represents deleting the files existing in the destination but not in source during replication. "preserveBlockSize" represents preserving block size during replication. "preserveReplicationNumber" represents preserving replication number during replication. -"preservePermission" represents preserving permission during +"preservePermission" represents preserving permission during replication. "preserveUser" represents preserving user during replication. +"preserveGroup" represents preserving group during replication. "preserveChecksumType" represents preserving checksum type during replication. +"preserveAcl" represents preserving ACL during replication. "preserveXattr" represents preserving Xattr during replication. +"preserveTimes" represents preserving access and modification times during replication. "tdeEncryptionEnabled" if TDE is enabled. ---+++ Lifecycle diff --git a/docs/src/site/twiki/InstallationSteps.twiki b/docs/src/site/twiki/InstallationSteps.twiki index 93b1eab25..297d88e88 100644 --- a/docs/src/site/twiki/InstallationSteps.twiki +++ b/docs/src/site/twiki/InstallationSteps.twiki @@ -27,16 +27,15 @@ $ mvn clean install It builds and installs the package into the local repository, for use as a dependency in other projects locally. -[optionally -Dhadoop.version=<> can be appended to build for a specific version of Hadoop] - -*NOTE:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards -[optionally -Doozie.version=<> can be appended to build with a specific version of Oozie. Oozie versions ->= 4 are supported] -NOTE: Falcon builds with JDK 1.7/1.8 using -noverify option - To compile Falcon with Hive Replication, optionally "-P hadoop-2,hivedr" can be appended. For this Hive >= 1.2.0 - and Oozie >= 4.2.0 should be available. +[optionally -Dhadoop.version=<> can be appended to build for a specific version of hadoop] +*Note 1:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards + Falcon build with JDK 1.7 using -noverify option +*Note 2:* To compile Falcon with addon extensions, append additional profiles to build command using syntax -P<> + For Hive Mirroring extension, use profile"hivedr". Hive >= 1.2.0 and Oozie >= 4.2.0 is required + For HDFS Snapshot mirroring extension, use profile "hdfs-snapshot-mirroring". Hadoop >= 2.7.0 is required + For ADF integration, use profile "adf" ---+++Step 3 - Package and Deploy Falcon diff --git a/docs/src/site/twiki/MigrationInstructions.twiki b/docs/src/site/twiki/MigrationInstructions.twiki index 7c0e02728..a11dbc47e 100644 --- a/docs/src/site/twiki/MigrationInstructions.twiki +++ b/docs/src/site/twiki/MigrationInstructions.twiki @@ -1,15 +1,32 @@ ---+ Migration Instructions ----++ Migrate from 0.5-incubating to 0.6-incubating +---++ Migrate from 0.9 to 0.10 -This is a placeholder wiki for migration instructions from falcon 0.5-incubating to 0.6-incubating. +FALCON-1333 (Instance Search feature) requires Falcon to use titan-berkeleyje version 0.5.4 to support indexing. +Up until version 0.9 - Falcon used titan-berkeleyje-jre6 version 0.4.2. GraphDB created by version 0.4.2 cannot be +read by version 0.5.4. The solution is to migrate the GraphDB to be compatible with Falcon 0.10 release. Please make +sure that no falcon server is running while performing the migration. ----+++ Update Entities +---+++ 1. Install Falcon 0.10 +Install Falcon 0.10 by following the [[InstallationSteps][Installation Steps]]. Do not start the falcon server yet. +The tool to migrate graphDB is packaged with 0.10 Falcon server in falcon-common-0.10.jar. ----+++ Change cluster dir permissions +---+++ 2. Export GraphDB to JSON file using Falcon 0.9 +Please run the following command to generate the JSON file. ----+++ Enable/Disable TLS + + $FALCON_HOME/bin/graphdbutil.sh export < <> <> <> /jsonFile/dir/ + ----+++ Authorization +This command will create /jsonFile/dir/instanceMetadata.json +---+++ 3. Import GraphDB from JSON file using Falcon 0.10 +Please run the following command to import graphDB the JSON file. The location of graphDB will be based on property +"*.falcon.graph.storage.directory" set in startup.properties file. + + + $FALCON_HOME/bin/graphdbutil.sh export < <> <> <> /jsonFile/dir/ + + +This command will import from /jsonFile/dir/instanceMetadata.json, now start the Falcon 0.10 server. diff --git a/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java b/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java index 24bbb8741..b160bb5ca 100644 --- a/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java +++ b/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java @@ -18,10 +18,12 @@ package org.apache.falcon.extensions; +import org.apache.commons.lang3.StringUtils; import org.apache.falcon.FalconException; import org.apache.falcon.extensions.mirroring.hdfs.HdfsMirroringExtension; import org.apache.falcon.extensions.mirroring.hdfsSnapshot.HdfsSnapshotMirroringExtension; import org.apache.falcon.extensions.mirroring.hive.HiveMirroringExtension; +import org.apache.falcon.util.ReplicationDistCpOption; import java.util.ArrayList; import java.util.Arrays; @@ -57,5 +59,15 @@ public static boolean isExtensionTrusted(final String extensionName) { public abstract void validate(final Properties extensionProperties) throws FalconException; public abstract Properties getAdditionalProperties(final Properties extensionProperties) throws FalconException; + + public static void addAdditionalDistCPProperties(final Properties extensionProperties, + final Properties additionalProperties) { + for (ReplicationDistCpOption distcpOption : ReplicationDistCpOption.values()) { + if (StringUtils.isBlank( + extensionProperties.getProperty(distcpOption.getName()))) { + additionalProperties.put(distcpOption.getName(), "false"); + } + } + } } diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java index f1acae26c..ef26d81f8 100644 --- a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java +++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java @@ -53,15 +53,16 @@ public Properties getAdditionalProperties(final Properties extensionProperties) Properties additionalProperties = new Properties(); // Add default properties if not passed - String distcpMaxMaps = extensionProperties.getProperty(HdfsMirroringExtensionProperties.MAX_MAPS.getName()); + String distcpMaxMaps = extensionProperties.getProperty( + HdfsMirroringExtensionProperties.DISTCP_MAX_MAPS.getName()); if (StringUtils.isBlank(distcpMaxMaps)) { - additionalProperties.put(HdfsMirroringExtensionProperties.MAX_MAPS.getName(), "1"); + additionalProperties.put(HdfsMirroringExtensionProperties.DISTCP_MAX_MAPS.getName(), "1"); } String distcpMapBandwidth = extensionProperties.getProperty( - HdfsMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName()); + HdfsMirroringExtensionProperties.DISTCP_MAP_BANDWIDTH_IN_MB.getName()); if (StringUtils.isBlank(distcpMapBandwidth)) { - additionalProperties.put(HdfsMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName(), "100"); + additionalProperties.put(HdfsMirroringExtensionProperties.DISTCP_MAP_BANDWIDTH_IN_MB.getName(), "100"); } // Construct fully qualified hdfs src path @@ -105,6 +106,13 @@ public Properties getAdditionalProperties(final Properties extensionProperties) } additionalProperties.put(HdfsMirroringExtensionProperties.TARGET_CLUSTER_FS_WRITE_ENDPOINT.getName(), ClusterHelper.getStorageUrl(targetCluster)); + + if (StringUtils.isBlank( + extensionProperties.getProperty(HdfsMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName()))) { + additionalProperties.put(HdfsMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName(), "false"); + } + + addAdditionalDistCPProperties(extensionProperties, additionalProperties); return additionalProperties; } diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java index 7d24b456e..52ae0c005 100644 --- a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java +++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java @@ -28,9 +28,10 @@ public enum HdfsMirroringExtensionProperties { TARGET_DIR("targetDir", "Location on target cluster for replication"), TARGET_CLUSTER("targetCluster", "Target cluster"), TARGET_CLUSTER_FS_WRITE_ENDPOINT("targetClusterFS", "Target cluster end point", false), - MAX_MAPS("distcpMaxMaps", "Maximum number of maps used during replication", false), - MAP_BANDWIDTH_IN_MB("distcpMapBandwidth", "Bandwidth in MB/s used by each mapper during replication", - false); + DISTCP_MAX_MAPS("distcpMaxMaps", "Maximum number of maps used during replication", false), + DISTCP_MAP_BANDWIDTH_IN_MB("distcpMapBandwidth", "Bandwidth in MB/s used by each mapper during replication", + false), + TDE_ENCRYPTION_ENABLED("tdeEncryptionEnabled", "Set to true if TDE encryption is enabled", false); private final String name; private final String description; diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirrorProperties.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirrorProperties.java index f17989632..ad707c83b 100644 --- a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirrorProperties.java +++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirrorProperties.java @@ -48,8 +48,8 @@ public enum HdfsSnapshotMirrorProperties { TARGET_SNAPSHOT_RETENTION_NUMBER("targetSnapshotRetentionNumber", "Number of latest target snapshots to retain on source", true), - DISTCP_MAX_MAPS("distcpMaxMaps", "Maximum number of maps used during distcp", false), - MAP_BANDWIDTH_IN_MB("distcpMapBandwidth", "Bandwidth in MB/s used by each mapper during replication", false), + MAX_MAPS("maxMaps", "Maximum number of maps used during distcp", false), + MAP_BANDWIDTH_IN_MB("mapBandwidth", "Bandwidth in MB/s used by each mapper during replication", false), TDE_ENCRYPTION_ENABLED("tdeEncryptionEnabled", "Is TDE encryption enabled on source and target", false), SNAPSHOT_JOB_NAME("snapshotJobName", "Name of snapshot based mirror job", false); diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirroringExtension.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirroringExtension.java index 09cce3b91..16b087d92 100644 --- a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirroringExtension.java +++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfsSnapshot/HdfsSnapshotMirroringExtension.java @@ -133,9 +133,9 @@ public Properties getAdditionalProperties(final Properties extensionProperties) Properties additionalProperties = new Properties(); // Add default properties if not passed - String distcpMaxMaps = extensionProperties.getProperty(HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName()); + String distcpMaxMaps = extensionProperties.getProperty(HdfsSnapshotMirrorProperties.MAX_MAPS.getName()); if (StringUtils.isBlank(distcpMaxMaps)) { - additionalProperties.put(HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName(), "1"); + additionalProperties.put(HdfsSnapshotMirrorProperties.MAX_MAPS.getName(), "1"); } String distcpMapBandwidth = extensionProperties.getProperty( @@ -223,6 +223,8 @@ public Properties getAdditionalProperties(final Properties extensionProperties) throw new FalconException("Cluster entity " + ExtensionProperties.CLUSTER_NAME.getName() + " not found"); } + + addAdditionalDistCPProperties(extensionProperties, additionalProperties); return additionalProperties; } diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java index c3bd7a741..6c7e5da84 100644 --- a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java +++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java @@ -115,14 +115,13 @@ public void validate(final Properties extensionProperties) throws FalconExceptio } @Override - public Properties getAdditionalProperties(final Properties extensionProperties) throws FalconException { + public Properties getAdditionalProperties(final Properties extensionProperties) + throws FalconException { Properties additionalProperties = new Properties(); - String jobName = extensionProperties.getProperty(ExtensionProperties.JOB_NAME.getName()); // Add job name as Hive DR job additionalProperties.put(HiveMirroringExtensionProperties.HIVE_MIRRORING_JOB_NAME.getName(), jobName); - // Get the first source DB additionalProperties.put(HiveMirroringExtensionProperties.SOURCE_DATABASE.getName(), extensionProperties.getProperty(HiveMirroringExtensionProperties.SOURCE_DATABASES @@ -235,28 +234,31 @@ public Properties getAdditionalProperties(final Properties extensionProperties) if (StringUtils.isBlank(distcpMaxMaps)) { additionalProperties.put(HiveMirroringExtensionProperties.DISTCP_MAX_MAPS.getName(), "1"); } - String distcpMapBandwidth = extensionProperties.getProperty( HiveMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName()); if (StringUtils.isBlank(distcpMapBandwidth)) { additionalProperties.put(HiveMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName(), "100"); } - if (StringUtils.isBlank( extensionProperties.getProperty(HiveMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName()))) { additionalProperties.put(HiveMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName(), "false"); } - if (StringUtils.isBlank( extensionProperties.getProperty(HiveMirroringExtensionProperties.SOURCE_STAGING_PATH.getName()))) { additionalProperties.put(HiveMirroringExtensionProperties.SOURCE_STAGING_PATH.getName(), NOT_APPLICABLE); } - if (StringUtils.isBlank( extensionProperties.getProperty(HiveMirroringExtensionProperties.TARGET_STAGING_PATH.getName()))) { additionalProperties.put(HiveMirroringExtensionProperties.TARGET_STAGING_PATH.getName(), NOT_APPLICABLE); } - + if (StringUtils.isBlank( + extensionProperties.getProperty(HiveMirroringExtensionProperties.SOURCE_HS2_EXTRA_OPTS.getName()))) { + additionalProperties.put(HiveMirroringExtensionProperties.SOURCE_HS2_EXTRA_OPTS.getName(), NOT_APPLICABLE); + } + if (StringUtils.isBlank( + extensionProperties.getProperty(HiveMirroringExtensionProperties.TARGET_HS2_EXTRA_OPTS.getName()))) { + additionalProperties.put(HiveMirroringExtensionProperties.TARGET_HS2_EXTRA_OPTS.getName(), NOT_APPLICABLE); + } return additionalProperties; } } diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java index 828817bc9..2276d1ca0 100644 --- a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java +++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java @@ -26,6 +26,7 @@ public enum HiveMirroringExtensionProperties { SOURCE_CLUSTER("sourceCluster", "Replication source cluster name"), SOURCE_METASTORE_URI("sourceMetastoreUri", "Source Hive metastore uri", false), SOURCE_HS2_URI("sourceHiveServer2Uri", "Source HS2 uri"), + SOURCE_HS2_EXTRA_OPTS("sourceHiveServer2ExtraOpts", "Source HS2 extra opts", false), SOURCE_DATABASES("sourceDatabases", "List of databases to replicate"), SOURCE_DATABASE("sourceDatabase", "Database to verify the setup connection", false), SOURCE_TABLES("sourceTables", "List of tables to replicate", false), @@ -40,6 +41,7 @@ public enum HiveMirroringExtensionProperties { TARGET_CLUSTER("targetCluster", "Target cluster name"), TARGET_METASTORE_URI("targetMetastoreUri", "Target Hive metastore uri", false), TARGET_HS2_URI("targetHiveServer2Uri", "Target HS2 uri"), + TARGET_HS2_EXTRA_OPTS("targetHiveServer2ExtraOpts", "Target HS2 extra opts", false), TARGET_STAGING_PATH("targetStagingPath", "Location of target staging path", false), TARGET_NN("targetNN", "Target name node", false), TARGET_NN_KERBEROS_PRINCIPAL("targetNNKerberosPrincipal", "Target name node kerberos principal", false), diff --git a/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java b/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java index 21e78d8e6..3386a313a 100644 --- a/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java +++ b/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java @@ -136,7 +136,7 @@ private static Properties getHdfsSnapshotExtensionProperties() { RETENTION_NUM); properties.setProperty(HdfsSnapshotMirrorProperties.TARGET_NN.getName(), NN_URI); - properties.setProperty(HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName(), + properties.setProperty(HdfsSnapshotMirrorProperties.MAX_MAPS.getName(), "5"); properties.setProperty(HdfsSnapshotMirrorProperties.MAP_BANDWIDTH_IN_MB.getName(), "100"); diff --git a/oozie/src/main/java/org/apache/falcon/oozie/feed/FSReplicationWorkflowBuilder.java b/oozie/src/main/java/org/apache/falcon/oozie/feed/FSReplicationWorkflowBuilder.java index e45dfc525..cfcc698fb 100644 --- a/oozie/src/main/java/org/apache/falcon/oozie/feed/FSReplicationWorkflowBuilder.java +++ b/oozie/src/main/java/org/apache/falcon/oozie/feed/FSReplicationWorkflowBuilder.java @@ -58,6 +58,7 @@ public FSReplicationWorkflowBuilder(Feed entity) { addHDFSServersConfig(replication, src, target); addAdditionalReplicationProperties(replication); enableCounters(replication); + enableTDE(replication); addTransition(replication, SUCCESS_POSTPROCESS_ACTION_NAME, FAIL_POSTPROCESS_ACTION_NAME); workflow.getDecisionOrForkOrJoin().add(replication); diff --git a/oozie/src/main/java/org/apache/falcon/oozie/feed/FeedReplicationWorkflowBuilder.java b/oozie/src/main/java/org/apache/falcon/oozie/feed/FeedReplicationWorkflowBuilder.java index 010446b2b..db647aa6f 100644 --- a/oozie/src/main/java/org/apache/falcon/oozie/feed/FeedReplicationWorkflowBuilder.java +++ b/oozie/src/main/java/org/apache/falcon/oozie/feed/FeedReplicationWorkflowBuilder.java @@ -49,6 +49,7 @@ public abstract class FeedReplicationWorkflowBuilder extends OozieOrchestrationW private static final String MR_MAX_MAPS = "maxMaps"; private static final String MR_MAP_BANDWIDTH = "mapBandwidth"; private static final String REPLICATION_JOB_COUNTER = "job.counter"; + private static final String TDE_ENCRYPTION_ENABLED = "tdeEncryptionEnabled"; public FeedReplicationWorkflowBuilder(Feed entity) { super(entity, LifeCycle.REPLICATION); @@ -58,7 +59,7 @@ public boolean isCounterEnabled() throws FalconException { if (entity.getProperties() != null) { List propertyList = entity.getProperties().getProperties(); for (Property prop : propertyList) { - if (prop.getName().equals(REPLICATION_JOB_COUNTER) && "true".equalsIgnoreCase(prop.getValue())) { + if (prop.getName().equals(REPLICATION_JOB_COUNTER) && "true" .equalsIgnoreCase(prop.getValue())) { return true; } } @@ -66,7 +67,8 @@ public boolean isCounterEnabled() throws FalconException { return false; } - @Override public Properties build(Cluster cluster, Path buildPath) throws FalconException { + @Override + public Properties build(Cluster cluster, Path buildPath) throws FalconException { Cluster srcCluster = ConfigurationStore.get().get(EntityType.CLUSTER, buildPath.getName()); WORKFLOWAPP workflow = getWorkflow(srcCluster, cluster); @@ -119,6 +121,15 @@ protected ACTION enableCounters(ACTION action) throws FalconException { return action; } + protected ACTION enableTDE(ACTION action) throws FalconException { + if (isTDEEnabled()) { + List args = action.getJava().getArg(); + args.add("-tdeEncryptionEnabled"); + args.add("true"); + } + return action; + } + protected abstract WORKFLOWAPP getWorkflow(Cluster src, Cluster target) throws FalconException; @Override @@ -133,4 +144,9 @@ private String getDefaultMaxMaps() { private String getDefaultMapBandwidth() { return RuntimeProperties.get().getProperty("falcon.replication.workflow.mapbandwidth", "100"); } + + private boolean isTDEEnabled() { + String tdeEncryptionEnabled = FeedHelper.getPropertyValue(entity, TDE_ENCRYPTION_ENABLED); + return "true" .equalsIgnoreCase(tdeEncryptionEnabled); + } } diff --git a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java index b879f87be..06d01421a 100644 --- a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java +++ b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java @@ -272,6 +272,24 @@ public boolean isCompleted(Entity entity) throws FalconException { || isBundleInState(bundles, BundleStatus.KILLED)); } + @Override + public boolean isMissing(Entity entity) throws FalconException { + List bundlesToRemove = new ArrayList<>(); + Map bundles = findLatestBundle(entity); + for (Map.Entry clusterBundle : bundles.entrySet()) { + if (clusterBundle.getValue() == MISSING) { // There is no active bundle for this cluster + bundlesToRemove.add(clusterBundle.getKey()); + } + } + for (String bundleToRemove : bundlesToRemove) { + bundles.remove(bundleToRemove); + } + if (bundles.size() == 0) { + return true; + } + return false; + } + private enum BundleStatus { ACTIVE, RUNNING, SUSPENDED, FAILED, KILLED, SUCCEEDED } @@ -1216,9 +1234,11 @@ private List getApplicableCoords(OozieClient client, Date start, } private boolean isCoordApplicable(String appName, List lifeCycles) { - for (LifeCycle lifeCycle : lifeCycles) { - if (appName.contains(lifeCycle.getTag().name())) { - return true; + if (lifeCycles != null && !lifeCycles.isEmpty()) { + for (LifeCycle lifeCycle : lifeCycles) { + if (appName.contains(lifeCycle.getTag().name())) { + return true; + } } } return false; diff --git a/pom.xml b/pom.xml index 9d33625aa..fe9426123 100644 --- a/pom.xml +++ b/pom.xml @@ -370,6 +370,7 @@ falcon-ui build-tools client + shell cli metrics titan @@ -755,12 +756,6 @@ ${titan.version} - - com.vividsolutions - jts - 1.13 - - org.apache.falcon falcon-hadoop-dependencies diff --git a/prism/src/main/java/org/apache/falcon/jdbc/BacklogMetricStore.java b/prism/src/main/java/org/apache/falcon/jdbc/BacklogMetricStore.java new file mode 100644 index 000000000..ef9a396eb --- /dev/null +++ b/prism/src/main/java/org/apache/falcon/jdbc/BacklogMetricStore.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.falcon.jdbc; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.falcon.FalconException; +import org.apache.falcon.entity.EntityUtil; +import org.apache.falcon.entity.v0.Entity; +import org.apache.falcon.entity.v0.EntityType; +import org.apache.falcon.persistence.BacklogMetricBean; +import org.apache.falcon.persistence.PersistenceConstants; +import org.apache.falcon.service.BacklogMetricEmitterService; +import org.apache.falcon.service.FalconJPAService; +import org.apache.falcon.util.MetricInfo; + +import javax.persistence.EntityManager; +import javax.persistence.Query; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Backlog Metric Store for entitties. + */ +public class BacklogMetricStore { + + private EntityManager getEntityManager() { + return FalconJPAService.get().getEntityManager(); + } + + + public void addInstance(String entityName, String cluster, Date nominalTime, EntityType entityType) { + BacklogMetricBean backlogMetricBean = new BacklogMetricBean(); + backlogMetricBean.setClusterName(cluster); + backlogMetricBean.setEntityName(entityName); + backlogMetricBean.setNominalTime(nominalTime); + backlogMetricBean.setEntityType(entityType.name()); + EntityManager entityManager = getEntityManager(); + try { + beginTransaction(entityManager); + entityManager.persist(backlogMetricBean); + } finally { + commitAndCloseTransaction(entityManager); + } + } + + public synchronized void deleteMetricInstance(String entityName, String cluster, Date nominalTime, + EntityType entityType) { + EntityManager entityManager = getEntityManager(); + beginTransaction(entityManager); + Query q = entityManager.createNamedQuery(PersistenceConstants.DELETE_BACKLOG_METRIC_INSTANCE); + q.setParameter("entityName", entityName); + q.setParameter("clusterName", cluster); + q.setParameter("nominalTime", nominalTime); + q.setParameter("entityType", entityType.name()); + try{ + q.executeUpdate(); + } finally { + commitAndCloseTransaction(entityManager); + } + } + + + private void beginTransaction(EntityManager entityManager) { + entityManager.getTransaction().begin(); + } + + private void commitAndCloseTransaction(EntityManager entityManager) { + if (entityManager != null) { + entityManager.getTransaction().commit(); + entityManager.close(); + } + } + + public Map> getAllInstances() throws FalconException { + EntityManager entityManager = getEntityManager(); + Query q = entityManager.createNamedQuery(PersistenceConstants.GET_ALL_BACKLOG_INSTANCES); + List result = q.getResultList(); + + try { + if (CollectionUtils.isEmpty(result)) { + return null; + } + } finally{ + entityManager.close(); + } + + Map> backlogMetrics = new HashMap<>(); + for (BacklogMetricBean backlogMetricBean : result) { + Entity entity = EntityUtil.getEntity(backlogMetricBean.getEntityType(), + backlogMetricBean.getEntityName()); + if (!backlogMetrics.containsKey(entity)) { + backlogMetrics.put(entity, new ArrayList()); + } + List metrics = backlogMetrics.get(entity); + MetricInfo metricInfo = new MetricInfo(BacklogMetricEmitterService.DATE_FORMAT.get() + .format(backlogMetricBean.getNominalTime()), + backlogMetricBean.getClusterName()); + metrics.add(metricInfo); + backlogMetrics.put(entity, metrics); + } + return backlogMetrics; + } +} diff --git a/prism/src/main/java/org/apache/falcon/resource/channel/HTTPChannel.java b/prism/src/main/java/org/apache/falcon/resource/channel/HTTPChannel.java index a63ae6374..187d6c790 100644 --- a/prism/src/main/java/org/apache/falcon/resource/channel/HTTPChannel.java +++ b/prism/src/main/java/org/apache/falcon/resource/channel/HTTPChannel.java @@ -46,6 +46,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status.Family; import javax.ws.rs.core.UriBuilder; +import java.io.IOException; import java.lang.annotation.Annotation; import java.lang.reflect.Method; import java.util.List; @@ -138,8 +139,8 @@ public T invoke(String methodName, Object... args) throws FalconException { if (incomingRequest != null) { incomingRequest.getInputStream().reset(); } - } catch (Exception ignore) { - // nothing to be done; + } catch (IOException e) { + LOG.error("Error in HTTPChannel", e); } } } diff --git a/prism/src/main/java/org/apache/falcon/service/BacklogMetricEmitterService.java b/prism/src/main/java/org/apache/falcon/service/BacklogMetricEmitterService.java new file mode 100644 index 000000000..801ab36fe --- /dev/null +++ b/prism/src/main/java/org/apache/falcon/service/BacklogMetricEmitterService.java @@ -0,0 +1,356 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.falcon.service; + +import org.apache.falcon.FalconException; +import org.apache.falcon.LifeCycle; +import org.apache.falcon.entity.EntityUtil; +import org.apache.falcon.entity.v0.Entity; +import org.apache.falcon.entity.v0.EntityType; +import org.apache.falcon.entity.v0.SchemaHelper; +import org.apache.falcon.entity.v0.process.Process; +import org.apache.falcon.jdbc.BacklogMetricStore; +import org.apache.falcon.metrics.MetricNotificationService; +import org.apache.falcon.resource.InstancesResult; +import org.apache.falcon.security.CurrentUser; +import org.apache.falcon.util.MetricInfo; +import org.apache.falcon.util.StartupProperties; +import org.apache.falcon.workflow.WorkflowExecutionContext; +import org.apache.falcon.workflow.WorkflowExecutionListener; +import org.apache.falcon.workflow.engine.AbstractWorkflowEngine; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import static org.apache.falcon.workflow.WorkflowEngineFactory.getWorkflowEngine; + +/** + * Backlog Metric Emitter Service to publish metrics to Graphite. + */ +public final class BacklogMetricEmitterService implements FalconService, + EntitySLAListener, WorkflowExecutionListener { + + private static final String METRIC_PREFIX = "falcon"; + private static final String METRIC_SEPARATOR = "."; + private static final String BACKLOG_METRIC_EMIT_INTERVAL = "falcon.backlog.metricservice.emit.interval.millisecs"; + private static final String BACKLOG_METRIC_RECHECK_INTERVAL = "falcon.backlog.metricservice." + + "recheck.interval.millisecs"; + private static final String DEFAULT_PIPELINE = "DEFAULT"; + + private static final Logger LOG = LoggerFactory.getLogger(BacklogMetricEmitterService.class); + + private static BacklogMetricStore backlogMetricStore = new BacklogMetricStore(); + + private static final BacklogMetricEmitterService SERVICE = new BacklogMetricEmitterService(); + + private static MetricNotificationService metricNotificationService = + Services.get().getService(MetricNotificationService.SERVICE_NAME); + + public static BacklogMetricEmitterService get() { + return SERVICE; + } + + private BacklogMetricEmitterService() { + } + + private ScheduledThreadPoolExecutor scheduledThreadPoolExecutor1 = new ScheduledThreadPoolExecutor(1); + private ScheduledThreadPoolExecutor scheduledThreadPoolExecutor2 = new ScheduledThreadPoolExecutor(1); + + + public static final ThreadLocal DATE_FORMAT = new ThreadLocal() { + @Override + protected SimpleDateFormat initialValue() { + SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH-mm'Z'"); + format.setTimeZone(TimeZone.getTimeZone("UTC")); + return format; + } + }; + + private static ConcurrentHashMap> entityBacklogs = new ConcurrentHashMap<>(); + + @Override + public void highSLAMissed(String entityName, String clusterName, EntityType entityType, Date nominalTime) + throws FalconException { + + if (entityType != EntityType.PROCESS) { + return; + } + Entity entity = EntityUtil.getEntity(entityType, entityName); + entityBacklogs.putIfAbsent(entity, Collections.synchronizedList(new ArrayList())); + List metricInfoList = entityBacklogs.get(entity); + String nominalTimeStr = DATE_FORMAT.get().format(nominalTime); + MetricInfo metricInfo = new MetricInfo(nominalTimeStr, clusterName); + if (!metricInfoList.contains(metricInfo)) { + synchronized (metricInfoList) { + backlogMetricStore.addInstance(entityName, clusterName, nominalTime, entityType); + metricInfoList.add(metricInfo); + } + } + } + + @Override + public String getName() { + return this.getClass().getSimpleName(); + } + + @Override + public void init() throws FalconException { + initInstances(); + int emitInterval = Integer.parseInt(StartupProperties.get().getProperty(BACKLOG_METRIC_EMIT_INTERVAL, + "60000")); + int recheckInterval = Integer.parseInt(StartupProperties.get().getProperty(BACKLOG_METRIC_RECHECK_INTERVAL, + "60000")); + scheduledThreadPoolExecutor1.scheduleAtFixedRate(new BacklogMetricEmitter(), + 1, emitInterval, TimeUnit.MILLISECONDS); + scheduledThreadPoolExecutor2.scheduleAtFixedRate(new BacklogCheckService(), + 1, recheckInterval, TimeUnit.MILLISECONDS); + } + + private void initInstances() throws FalconException { + LOG.info("Initializing backlog instances from state store"); + Map> backlogInstances = backlogMetricStore.getAllInstances(); + if (backlogInstances != null && !backlogInstances.isEmpty()) { + for (Map.Entry> entry : backlogInstances.entrySet()) { + List metricsInDB = entry.getValue(); + List metricInfoList = Collections.synchronizedList(metricsInDB); + entityBacklogs.put(entry.getKey(), metricInfoList); + LOG.debug("Backlog of entity " + entry.getKey().getName() + " for instances " + metricInfoList); + } + } + } + + @Override + public void destroy() throws FalconException { + scheduledThreadPoolExecutor1.shutdown(); + scheduledThreadPoolExecutor2.shutdown(); + } + + @Override + public synchronized void onSuccess(WorkflowExecutionContext context) throws FalconException { + Entity entity = EntityUtil.getEntity(context.getEntityType(), context.getEntityName()); + if (entity.getEntityType() != EntityType.PROCESS) { + return; + } + if (entityBacklogs.containsKey(entity)) { + List metrics = entityBacklogs.get(entity); + synchronized (metrics) { + Date date = SchemaHelper.parseDateUTC(context.getNominalTimeAsISO8601()); + backlogMetricStore.deleteMetricInstance(entity.getName(), context.getClusterName(), + date, entity.getEntityType()); + metrics.remove(new MetricInfo(DATE_FORMAT.get().format(date), context.getClusterName())); + if (metrics.isEmpty()) { + entityBacklogs.remove(entity); + } + } + } + } + + @Override + public void onFailure(WorkflowExecutionContext context) throws FalconException { + // Do Nothing + } + + @Override + public void onStart(WorkflowExecutionContext context) throws FalconException { + // Do Nothing + } + + @Override + public void onSuspend(WorkflowExecutionContext context) throws FalconException { + // Do Nothing + } + + @Override + public void onWait(WorkflowExecutionContext context) throws FalconException { + // Do Nothing + } + + /** + * Service which executes backlog evaluation and publishing metrics to Graphite parallel for entities. + */ + public static class BacklogMetricEmitter implements Runnable { + private ThreadPoolExecutor executor; + + @Override + public void run() { + LOG.debug("BacklogMetricEmitter running for entities"); + executor = new ScheduledThreadPoolExecutor(10); + List futures = new ArrayList<>(); + try { + for (Entity entity : entityBacklogs.keySet()) { + futures.add(executor.submit(new BacklogCalcService(entity, entityBacklogs.get(entity)))); + } + waitForFuturesToComplete(futures); + } finally { + executor.shutdown(); + } + } + + private void waitForFuturesToComplete(List futures) { + try { + for (Future future : futures) { + future.get(); + } + } catch (InterruptedException e) { + LOG.error("Interruption while executing tasks " + e); + } catch (ExecutionException e) { + LOG.error("Error in executing threads " + e); + } + } + } + + /** + * Service which calculates backlog for given entity and publish to graphite. + */ + public static class BacklogCalcService implements Runnable { + + private Entity entityObj; + private List metrics; + + BacklogCalcService(Entity entity, List metricInfoList) { + this.entityObj = entity; + this.metrics = metricInfoList; + } + + @Override + public void run() { + + MetricInfo metricInfo = null; + HashMap backLogsCluster = new HashMap<>(); + synchronized (metrics) { + long currentTime = System.currentTimeMillis(); + Iterator iter = metrics.iterator(); + while (iter.hasNext()) { + try { + metricInfo = (MetricInfo) iter.next(); + long time = DATE_FORMAT.get().parse(metricInfo.getNominalTime()).getTime(); + long backlog = backLogsCluster.containsKey(metricInfo.getCluster()) + ? backLogsCluster.get(metricInfo.getCluster()) : 0; + backlog += (currentTime - time); + backLogsCluster.put(metricInfo.getCluster(), backlog); + } catch (ParseException e) { + LOG.error("Unable to parse nominal time" + metricInfo.getNominalTime()); + } + } + + } + org.apache.falcon.entity.v0.process.Process process = (Process) entityObj; + + if (backLogsCluster != null && !backLogsCluster.isEmpty()) { + for (Map.Entry entry : backLogsCluster.entrySet()) { + String clusterName = entry.getKey(); + String pipelinesStr = process.getPipelines(); + String metricName; + Long backlog = entry.getValue() / (60 * 1000L); // Converting to minutes + if (pipelinesStr != null && !pipelinesStr.isEmpty()) { + String[] pipelines = pipelinesStr.split(","); + for (String pipeline : pipelines) { + metricName = METRIC_PREFIX + METRIC_SEPARATOR + clusterName + METRIC_SEPARATOR + + pipeline + METRIC_SEPARATOR + LifeCycle.EXECUTION.name() + + METRIC_SEPARATOR + entityObj.getName() + METRIC_SEPARATOR + + "backlogInMins"; + metricNotificationService.publish(metricName, backlog); + } + } else { + metricName = METRIC_PREFIX + METRIC_SEPARATOR + clusterName + METRIC_SEPARATOR + + DEFAULT_PIPELINE + METRIC_SEPARATOR + LifeCycle.EXECUTION.name() + + METRIC_SEPARATOR + entityObj.getName() + METRIC_SEPARATOR + + "backlogInMins"; + metricNotificationService.publish(metricName, backlog); + } + } + } + } + } + + + /** + * Service runs periodically and removes succeeded instances from backlog list. + */ + public static class BacklogCheckService implements Runnable { + + @Override + public void run() { + LOG.debug("BacklogCheckService running for entities"); + try { + AbstractWorkflowEngine wfEngine = getWorkflowEngine(); + for (Entity entity : entityBacklogs.keySet()) { + List metrics = entityBacklogs.get(entity); + if (!metrics.isEmpty()) { + synchronized (metrics) { + Iterator iterator = metrics.iterator(); + while (iterator.hasNext()) { + MetricInfo metricInfo = (MetricInfo) iterator.next(); + String nominalTimeStr = metricInfo.getNominalTime(); + Date nominalTime; + try { + nominalTime = DATE_FORMAT.get().parse(nominalTimeStr); + if (entity.getACL().getOwner() != null && !entity.getACL().getOwner().isEmpty()) { + CurrentUser.authenticate(entity.getACL().getOwner()); + } else { + CurrentUser.authenticate(System.getProperty("user.name")); + } + if (wfEngine.isMissing(entity)) { + LOG.info("Entity of name {} was deleted so removing instance of " + + "nominaltime {} ", entity.getName(), nominalTimeStr); + backlogMetricStore.deleteMetricInstance(entity.getName(), + metricInfo.getCluster(), nominalTime, entity.getEntityType()); + iterator.remove(); + continue; + } + InstancesResult status = wfEngine.getStatus(entity, nominalTime, + nominalTime, null, null); + if (status.getInstances().length > 0 + && status.getInstances()[0].status == InstancesResult. + WorkflowStatus.SUCCEEDED) { + LOG.debug("Instance of nominaltime {} of entity {} was succeeded, removing " + + "from backlog entries", nominalTimeStr, entity.getName()); + backlogMetricStore.deleteMetricInstance(entity.getName(), + metricInfo.getCluster(), nominalTime, entity.getEntityType()); + iterator.remove(); + } + } catch (ParseException e) { + LOG.error("Unable to parse date " + nominalTimeStr); + } + } + } + } + } + } catch (Throwable e) { + LOG.error("Error while checking backlog metrics" + e); + } + } + } + +} diff --git a/prism/src/main/java/org/apache/falcon/service/EntitySLAAlertService.java b/prism/src/main/java/org/apache/falcon/service/EntitySLAAlertService.java index 57e46b7f1..a7cafeb23 100644 --- a/prism/src/main/java/org/apache/falcon/service/EntitySLAAlertService.java +++ b/prism/src/main/java/org/apache/falcon/service/EntitySLAAlertService.java @@ -150,7 +150,7 @@ void processSLACandidates(){ LOG.info("Entity :"+ entityName + "Cluster:" + clusterName + "Nominal Time:" + nominalTime + "EntityType:"+ entityType + "missed SLAHigh"); - highSLAMissed(entityName, clusterName, entityType, nominalTime); + highSLAMissed(entityName, clusterName, EntityType.valueOf(entityType), nominalTime); } } } catch (FalconException e){ @@ -160,12 +160,12 @@ void processSLACandidates(){ } @Override - public void highSLAMissed(String entityName, String clusterName, String entityType , Date nominalTime + public void highSLAMissed(String entityName, String clusterName, EntityType entityType , Date nominalTime ) throws FalconException { LOG.debug("Listners called..."); for (EntitySLAListener listener : listeners) { listener.highSLAMissed(entityName, clusterName, entityType, nominalTime); - store.deleteEntityAlertInstance(entityName, clusterName, nominalTime, entityType); + store.deleteEntityAlertInstance(entityName, clusterName, nominalTime, entityType.name()); } } } diff --git a/prism/src/main/java/org/apache/falcon/service/EntitySLAListener.java b/prism/src/main/java/org/apache/falcon/service/EntitySLAListener.java index 421ea38dd..73d383b37 100644 --- a/prism/src/main/java/org/apache/falcon/service/EntitySLAListener.java +++ b/prism/src/main/java/org/apache/falcon/service/EntitySLAListener.java @@ -18,6 +18,7 @@ package org.apache.falcon.service; import org.apache.falcon.FalconException; +import org.apache.falcon.entity.v0.EntityType; import java.util.Date; @@ -25,6 +26,6 @@ * Interface for FeedSLAAlert to be used by Listeners. */ public interface EntitySLAListener { - void highSLAMissed(String entityName, String clusterName, String entityType, Date nominalTime) + void highSLAMissed(String entityName, String clusterName, EntityType entityType, Date nominalTime) throws FalconException; } diff --git a/prism/src/main/java/org/apache/falcon/util/MetricInfo.java b/prism/src/main/java/org/apache/falcon/util/MetricInfo.java new file mode 100644 index 000000000..694bb8783 --- /dev/null +++ b/prism/src/main/java/org/apache/falcon/util/MetricInfo.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.falcon.util; + +/** + * Storage for Backlog Metrics. + */ +public class MetricInfo { + + private String nominalTime; + private String cluster; + + public MetricInfo(String nominalTimeStr, String clusterName) { + this.nominalTime = nominalTimeStr; + this.cluster = clusterName; + } + + public String getNominalTime() { + return nominalTime; + } + + public String getCluster() { + return cluster; + } + + public void setCluster(String cluster) { + this.cluster = cluster; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || !o.getClass().equals(this.getClass())) { + return false; + } + + MetricInfo other = (MetricInfo) o; + + boolean nominalTimeEqual = this.getNominalTime() != null + ? this.getNominalTime().equals(other.getNominalTime()) : other.getNominalTime() == null; + + boolean clusterEqual = this.getCluster() != null + ? this.getCluster().equals(other.getCluster()) : other.getCluster() == null; + + return this == other + || (nominalTimeEqual && clusterEqual); + } + + @Override + public int hashCode() { + int result = nominalTime != null ? nominalTime.hashCode() : 0; + result = 31 * result + (cluster != null ? cluster.hashCode() : 0); + return result; + } + + public String toString() { + return "Nominaltime: " + this.getNominalTime() + " cluster: " + this.getCluster(); + } + + +} diff --git a/prism/src/test/java/org/apache/falcon/service/BacklogMetricEmitterServiceTest.java b/prism/src/test/java/org/apache/falcon/service/BacklogMetricEmitterServiceTest.java new file mode 100644 index 000000000..67d256e98 --- /dev/null +++ b/prism/src/test/java/org/apache/falcon/service/BacklogMetricEmitterServiceTest.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.falcon.service; + +import org.apache.falcon.cluster.util.EmbeddedCluster; +import org.apache.falcon.entity.AbstractTestBase; +import org.apache.falcon.entity.v0.EntityType; +import org.apache.falcon.jdbc.BacklogMetricStore; +import org.apache.falcon.metrics.MetricNotificationService; +import org.apache.falcon.tools.FalconStateStoreDBCLI; +import org.apache.falcon.util.StateStoreProperties; +import org.apache.falcon.workflow.WorkflowExecutionArgs; +import org.apache.falcon.workflow.WorkflowExecutionContext; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Test cases for Backlog Metric Store. + */ +public class BacklogMetricEmitterServiceTest extends AbstractTestBase{ + private static final String DB_BASE_DIR = "target/test-data/backlogmetricdb"; + protected static String dbLocation = DB_BASE_DIR + File.separator + "data.db"; + protected static String url = "jdbc:derby:"+ dbLocation +";create=true"; + protected static final String DB_SQL_FILE = DB_BASE_DIR + File.separator + "out.sql"; + protected LocalFileSystem fs = new LocalFileSystem(); + + private static BacklogMetricStore backlogMetricStore; + private static FalconJPAService falconJPAService = FalconJPAService.get(); + private static BacklogMetricEmitterService backlogMetricEmitterService; + private MetricNotificationService mockMetricNotificationService; + + protected int execDBCLICommands(String[] args) { + return new FalconStateStoreDBCLI().run(args); + } + + public void createDB(String file) { + File sqlFile = new File(file); + String[] argsCreate = { "create", "-sqlfile", sqlFile.getAbsolutePath(), "-run" }; + int result = execDBCLICommands(argsCreate); + Assert.assertEquals(0, result); + Assert.assertTrue(sqlFile.exists()); + + } + + @AfterClass + public void cleanup() throws IOException { + cleanupDB(); + } + + private void cleanupDB() throws IOException { + fs.delete(new Path(DB_BASE_DIR), true); + } + + @BeforeClass + public void setup() throws Exception{ + StateStoreProperties.get().setProperty(FalconJPAService.URL, url); + Configuration localConf = new Configuration(); + fs.initialize(LocalFileSystem.getDefaultUri(localConf), localConf); + fs.mkdirs(new Path(DB_BASE_DIR)); + createDB(DB_SQL_FILE); + falconJPAService.init(); + this.dfsCluster = EmbeddedCluster.newCluster("testCluster"); + this.conf = dfsCluster.getConf(); + backlogMetricStore = new BacklogMetricStore(); + mockMetricNotificationService = Mockito.mock(MetricNotificationService.class); + Mockito.when(mockMetricNotificationService.getName()).thenReturn("MetricNotificationService"); + Services.get().register(mockMetricNotificationService); + Services.get().register(BacklogMetricEmitterService.get()); + backlogMetricEmitterService = BacklogMetricEmitterService.get(); + + } + + + @Test + public void testBacklogEmitter() throws Exception { + backlogMetricEmitterService.init(); + storeEntity(EntityType.PROCESS, "entity1"); + backlogMetricEmitterService.highSLAMissed("entity1", "cluster1", EntityType.PROCESS, + BacklogMetricEmitterService.DATE_FORMAT.get().parse("2016-06-30T00-00Z")); + Thread.sleep(10); + ArgumentCaptor captor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor valueCaptor = ArgumentCaptor.forClass(Long.class); + Mockito.verify(mockMetricNotificationService, Mockito.atLeastOnce()).publish(captor.capture(), + valueCaptor.capture()); + Assert.assertEquals(captor.getValue(), "falcon.cluster1.testPipeline.EXECUTION.entity1.backlogInMins"); + WorkflowExecutionContext workflowExecutionContext = getWorkflowExecutionContext(); + backlogMetricEmitterService.onSuccess(workflowExecutionContext); + Thread.sleep(100); + Mockito.reset(mockMetricNotificationService); + Mockito.verify(mockMetricNotificationService, Mockito.times(0)).publish(Mockito.any(String.class), + Mockito.any(Long.class)); + + } + + private WorkflowExecutionContext getWorkflowExecutionContext() { + Map args = new HashMap<>(); + args.put(WorkflowExecutionArgs.ENTITY_TYPE, "process"); + args.put(WorkflowExecutionArgs.CLUSTER_NAME, "cluster1"); + args.put(WorkflowExecutionArgs.ENTITY_NAME, "entity1"); + args.put(WorkflowExecutionArgs.NOMINAL_TIME, "2016-06-30-00-00"); + args.put(WorkflowExecutionArgs.OPERATION, "GENERATE"); + WorkflowExecutionContext workflowExecutionContext = new WorkflowExecutionContext(args); + return workflowExecutionContext; + + } +} diff --git a/prism/src/test/resources/startup.properties b/prism/src/test/resources/startup.properties new file mode 100644 index 000000000..d72dbba6d --- /dev/null +++ b/prism/src/test/resources/startup.properties @@ -0,0 +1,338 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +*.domain=debug + +######### Implementation classes ######### +## DONT MODIFY UNLESS SURE ABOUT CHANGE ## + +*.workflow.engine.impl=org.apache.falcon.workflow.engine.OozieWorkflowEngine +*.lifecycle.engine.impl=org.apache.falcon.lifecycle.engine.oozie.OoziePolicyBuilderFactory +*.oozie.process.workflow.builder=org.apache.falcon.workflow.OozieProcessWorkflowBuilder +*.oozie.feed.workflow.builder=org.apache.falcon.workflow.OozieFeedWorkflowBuilder +*.SchedulableEntityManager.impl=org.apache.falcon.resource.SchedulableEntityManager +*.ConfigSyncService.impl=org.apache.falcon.resource.ConfigSyncService +*.ProcessInstanceManager.impl=org.apache.falcon.resource.InstanceManager +*.catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService + +##### Falcon Services ##### +*.application.services=org.apache.falcon.security.AuthenticationInitializationService,\ + org.apache.falcon.workflow.WorkflowJobEndNotificationService, \ + org.apache.falcon.service.ProcessSubscriberService,\ + org.apache.falcon.extensions.ExtensionService,\ + org.apache.falcon.service.EntitySLAMonitoringService,\ + org.apache.falcon.service.LifecyclePolicyMap,\ + org.apache.falcon.entity.store.ConfigurationStore,\ + org.apache.falcon.rerun.service.RetryService,\ + org.apache.falcon.rerun.service.LateRunService,\ + org.apache.falcon.metadata.MetadataMappingService,\ + org.apache.falcon.service.LogCleanupService,\ + org.apache.falcon.service.GroupsService,\ + org.apache.falcon.service.ProxyUserService,\ + org.apache.falcon.service.FalconJPAService +##Add if you want to send data to graphite +# org.apache.falcon.metrics.MetricNotificationService\ +## Add if you want to use Falcon Azure integration ## +# org.apache.falcon.adfservice.ADFProviderService +## If you wish to use Falcon native scheduler add the commented out services below to application.services ## +# org.apache.falcon.notification.service.impl.JobCompletionService,\ +# org.apache.falcon.notification.service.impl.SchedulerService,\ +# org.apache.falcon.notification.service.impl.AlarmService,\ +# org.apache.falcon.notification.service.impl.DataAvailabilityService,\ +# org.apache.falcon.execution.FalconExecutionService,\ + + + +# List of Lifecycle policies configured. +*.falcon.feed.lifecycle.policies=org.apache.falcon.lifecycle.retention.AgeBasedDelete +# List of builders for the policies. +*.falcon.feed.lifecycle.policy.builders=org.apache.falcon.lifecycle.engine.oozie.retention.AgeBasedDeleteBuilder +##### Falcon Configuration Store Change listeners ##### +*.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\ + org.apache.falcon.entity.ColoClusterRelation,\ + org.apache.falcon.group.FeedGroupMap,\ + org.apache.falcon.entity.store.FeedLocationStore,\ + org.apache.falcon.service.EntitySLAMonitoringService,\ + org.apache.falcon.service.SharedLibraryHostingService +## If you wish to use Falcon native scheduler, add the State store as a configstore listener. ## +# org.apache.falcon.state.store.jdbc.JdbcStateStore + +## If you wish to use Feed Alert to know when a feed misses a high SLA register your class here +*.feedAlert.listeners= + +##### JMS MQ Broker Implementation class ##### +*.broker.impl.class=org.apache.activemq.ActiveMQConnectionFactory + +##### List of shared libraries for Falcon workflows ##### +*.shared.libs=activemq-all,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el + +##### Workflow Job Execution Completion listeners ##### +*.workflow.execution.listeners= + +######### Implementation classes ######### + + +######### System startup parameters ######### + +# Location of libraries that is shipped to Hadoop +*.system.lib.location=${FALCON_HOME}/sharedlibs + +# Location to store user entity configurations + +#Configurations used in UTs +debug.config.store.uri=file://${user.dir}/target/store +#Location to store state of Feed SLA monitoring service +debug.feed.sla.service.store.uri= file://${user.dir}/target/data/sla/pendingfeedinstances +debug.config.oozie.conf.uri=${user.dir}/target/oozie +debug.system.lib.location=${system.lib.location} +debug.broker.url=vm://localhost +debug.retry.recorder.path=${user.dir}/target/retry +debug.libext.feed.retention.paths=${falcon.libext} +debug.libext.feed.replication.paths=${falcon.libext} +debug.libext.process.paths=${falcon.libext} + +debug.extension.store.uri=file://${user.dir}/target/extension/store + +#Configurations used in ITs +it.config.store.uri=file://${user.dir}/target/store +it.config.oozie.conf.uri=${user.dir}/target/oozie +it.system.lib.location=${system.lib.location} +it.broker.url=tcp://localhost:61616 +it.retry.recorder.path=${user.dir}/target/retry +it.libext.feed.retention.paths=${falcon.libext} +it.libext.feed.replication.paths=${falcon.libext} +it.libext.process.paths=${falcon.libext} +it.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandler + +*.falcon.cleanup.service.frequency=minutes(5) + +######### Properties for Feed SLA Monitoring ######### +# frequency of serialization for the state of FeedSLAMonitoringService - 1 hour +*.feed.sla.serialization.frequency.millis=3600000 + +# Maximum number of pending instances per feed that will be recorded. After this older instances will be removed in +# a FIFO fashion. +*.feed.sla.queue.size=288 + +# Do not change unless really sure +# Frequency in seconds of "status check" for pending feed instances, default is 10 mins = 10 * 60 +*.feed.sla.statusCheck.frequency.seconds=600 + +# Do not change unless really sure +# Time Duration (in milliseconds) in future for generating pending feed instances. +# In every cycle pending feed instances are added for monitoring, till this time in future. +# It must be more than statusCheck frequency, default is 15 mins = 15 * 60 * 1000 +*.feed.sla.lookAheadWindow.millis=900000 + + +######### Properties for configuring JMS provider - activemq ######### +# Default Active MQ url +*.broker.url=tcp://localhost:61616 + +# default time-to-live for a JMS message 3 days (time in minutes) +*.broker.ttlInMins=4320 +*.entity.topic=FALCON.ENTITY.TOPIC +*.max.retry.failure.count=1 +*.retry.recorder.path=${user.dir}/logs/retry + +######### Properties for configuring iMon client and metric ######### +*.internal.queue.size=1000 + + +######### Graph Database Properties ######### +# Graph implementation +*.falcon.graph.blueprints.graph=com.thinkaurelius.titan.core.TitanFactory + +# Graph Storage +# IMPORTANT: Please enable one of the graph db backend: hbase or berkeleydb, per instructions below. + +# Enable the following for Berkeley DB. Make sure je-5.0.73.jar is downloaded and available +# under Falcon webapp directory or under falcon server classpath. +#*.falcon.graph.storage.backend=berkeleyje +#*.falcon.graph.storage.directory=/${falcon.home}/data/graphdb +#*.falcon.graph.serialize.path=${user.dir}/target/graphdb + +# Enable the following for HBase +#*.falcon.graph.storage.backend=hbase +# For standalone mode , set hostname to localhost; for distributed mode, set to the zookeeper quorum +# @see http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2 +#*.falcon.graph.storage.hostname=localhost +#*.falcon.graph.storage.hbase.table=falcon_titan + +# Avoid acquiring read lock when iterating over large graphs +# See http://s3.thinkaurelius.com/docs/titan/0.5.4/bdb.html +*.falcon.graph.storage.transactions=false + +# Uncomment and override the following properties for enabling metrics for titan db and pushing them to graphite. You +# can use other reporters like ganglia also. +# Refer (http://thinkaurelius.github.io/titan/wikidoc/0.4.2/Titan-Performance-and-Monitoring)for finding the +# relevant configurations for your use case. NOTE: you have to prefix all the properties with "*.falcon.graph." +# *.falcon.graph.storage.enable-basic-metrics = true +# Required; IP or hostname string +# *.falcon.graph.metrics.graphite.hostname = 192.168.0.1 +# Required; specify logging interval in milliseconds +# *.falcon.graph.metrics.graphite.interval = 60000 + +######### Authentication Properties ######### + +# Authentication type must be specified: simple|kerberos +*.falcon.authentication.type=simple + +##### Service Configuration + +# Indicates the Kerberos principal to be used in Falcon Service. +*.falcon.service.authentication.kerberos.principal= + +# Location of the keytab file with the credentials for the Service principal. +*.falcon.service.authentication.kerberos.keytab= + +# name node principal to talk to config store +*.dfs.namenode.kerberos.principal= + +##### SPNEGO Configuration + +# Authentication type must be specified: simple|kerberos| +# org.apache.falcon.security.RemoteUserInHeaderBasedAuthenticationHandler can be used for backwards compatibility +*.falcon.http.authentication.type=simple + +# Indicates how long (in seconds) an authentication token is valid before it has to be renewed. +*.falcon.http.authentication.token.validity=36000 + +# The signature secret for signing the authentication tokens. +*.falcon.http.authentication.signature.secret=falcon + +# The domain to use for the HTTP cookie that stores the authentication token. +*.falcon.http.authentication.cookie.domain= + +# Indicates if anonymous requests are allowed when using 'simple' authentication. +*.falcon.http.authentication.simple.anonymous.allowed=false + +# Indicates the Kerberos principal to be used for HTTP endpoint. +# The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification. +*.falcon.http.authentication.kerberos.principal= + +# Location of the keytab file with the credentials for the HTTP principal. +*.falcon.http.authentication.kerberos.keytab= + +# The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's KerberosName for more details. +*.falcon.http.authentication.kerberos.name.rules=DEFAULT + +# Comma separated list of black listed users +*.falcon.http.authentication.blacklisted.users= + +######### Authentication Properties ######### + + +######### Authorization Properties ######### + +# Authorization Enabled flag: false (default)|true +*.falcon.security.authorization.enabled=false + +# The name of the group of super-users +*.falcon.security.authorization.superusergroup=falcon + +# Admin Users, comma separated users +*.falcon.security.authorization.admin.users=falcon,ambari-qa + +# Admin Group Membership, comma separated users +*.falcon.security.authorization.admin.groups=falcon,staff + +# Authorization Provider Implementation Fully Qualified Class Name +*.falcon.security.authorization.provider=org.apache.falcon.security.DefaultAuthorizationProvider + +######### Authorization Properties ######### + +######### ADF Configurations start ######### + +# A String object that represents the namespace +*.microsoft.windowsazure.services.servicebus.namespace= + +# Request and status queues on the namespace +*.microsoft.windowsazure.services.servicebus.requestqueuename= +*.microsoft.windowsazure.services.servicebus.statusqueuename= + +# A String object that contains the SAS key name +*.microsoft.windowsazure.services.servicebus.sasKeyName= + +# A String object that contains the SAS key +*.microsoft.windowsazure.services.servicebus.sasKey= + +# A String object containing the base URI that is added to your Service Bus namespace to form the URI to connect +# to the Service Bus service. To access the default public Azure service, pass ".servicebus.windows.net" +*.microsoft.windowsazure.services.servicebus.serviceBusRootUri= + +# Service bus polling frequency +*.microsoft.windowsazure.services.servicebus.polling.frequency= + +# Super user +*.microsoft.windowsazure.services.servicebus.superuser= + +######### ADF Configurations end ########### + +######### SMTP Properties ######## + +# Setting SMTP hostname +#*.falcon.email.smtp.host=localhost + +# Setting SMTP port number +#*.falcon.email.smtp.port=25 + +# Setting email from address +#*.falcon.email.from.address=falcon@localhost + +# Setting email Auth +#*.falcon.email.smtp.auth=false + +#Setting user name +#*.falcon.email.smtp.user="" + +#Setting password +#*.falcon.email.smtp.password="" + +# Setting monitoring plugin, if SMTP parameters is defined +#*.monitoring.plugins=org.apache.falcon.plugin.DefaultMonitoringPlugin,\ +# org.apache.falcon.plugin.EmailNotificationPlugin + +######### StateStore Properties ##### +#*.falcon.state.store.impl=org.apache.falcon.state.store.jdbc.JDBCStateStore +#*.falcon.statestore.jdbc.driver=org.apache.derby.jdbc.EmbeddedDriver +#*.falcon.statestore.jdbc.url=jdbc:derby:data/statestore.db;create=true +#*.falcon.statestore.jdbc.username=sa +#*.falcon.statestore.jdbc.password= +#*.falcon.statestore.connection.data.source=org.apache.commons.dbcp.BasicDataSource +## Maximum number of active connections that can be allocated from this pool at the same time. +#*.falcon.statestore.pool.max.active.conn=10 +#*.falcon.statestore.connection.properties= +## Indicates the interval (in milliseconds) between eviction runs. +#*.falcon.statestore.validate.db.connection.eviction.interval=300000 +## The number of objects to examine during each run of the idle object evictor thread. +#*.falcon.statestore.validate.db.connection.eviction.num=10 +## Creates Falcon DB. +## If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP. +## If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up. +#*.falcon.statestore.create.db.schema=true + +# Graphite properties +*.falcon.graphite.hostname=localhost +*.falcon.graphite.port=2003 +*.falcon.graphite.frequency=1 +*.falcon.graphite.prefix=falcon + +# Backlog Metric Properties +*.falcon.backlog.metricservice.emit.interval.millisecs=10 +*.falcon.backlog.metricservice.recheck.interval.millisecs=1000 diff --git a/release-docs/0.10/CHANGES.0.10.md b/release-docs/0.10/CHANGES.0.10.md index d3a6ef32b..bfe3a27ee 100644 --- a/release-docs/0.10/CHANGES.0.10.md +++ b/release-docs/0.10/CHANGES.0.10.md @@ -1,12 +1,13 @@ # Apache Falcon Changelog -## Release 0.10 - 2016-07-19 +## Release 0.10 - 2016-07-26 ### INCOMPATIBLE CHANGES: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [FALCON-1926](https://issues.apache.org/jira/browse/FALCON-1926) | Filter out effectively non-falcon related JMS messages from Oozie | Major | messaging | Venkatesan Ramachandran | Venkatesan Ramachandran | +| [FALCON-1858](https://issues.apache.org/jira/browse/FALCON-1858) | Support HBase as a storage backend for Falcon Titan graphDB | Major | . | Ying Zheng | Venkat Ranganathan | | [FALCON-1852](https://issues.apache.org/jira/browse/FALCON-1852) | Optional Input for a process not truly optional | Major | . | Pallavi Rao | Pallavi Rao | | [FALCON-1844](https://issues.apache.org/jira/browse/FALCON-1844) | Falcon feed replication leaves behind old files when a feed instance is re-run | Major | . | Pallavi Rao | Pallavi Rao | | [FALCON-1835](https://issues.apache.org/jira/browse/FALCON-1835) | Falcon should do coord rerun rather than workflow rerun to ensure concurrency | Major | . | Pallavi Rao | Pallavi Rao | @@ -20,7 +21,6 @@ | [FALCON-1919](https://issues.apache.org/jira/browse/FALCON-1919) | Provide user the option to store sensitive information with Hadoop credential provider | Major | . | Ying Zheng | Ying Zheng | | [FALCON-1865](https://issues.apache.org/jira/browse/FALCON-1865) | Persist Feed sla data to database | Major | . | Ajay Yadava | Praveen Adlakha | | [FALCON-1861](https://issues.apache.org/jira/browse/FALCON-1861) | Support HDFS Snapshot based replication in Falcon | Major | replication | Balu Vellanki | Balu Vellanki | -| [FALCON-1858](https://issues.apache.org/jira/browse/FALCON-1858) | Support HBase as a storage backend for Falcon Titan graphDB | Major | . | Ying Zheng | Venkat Ranganathan | | [FALCON-1763](https://issues.apache.org/jira/browse/FALCON-1763) | Create a spark execution engine for Falcon | Major | . | Venkat Ranganathan | Peeyush Bishnoi | | [FALCON-1627](https://issues.apache.org/jira/browse/FALCON-1627) | Provider integration with Azure Data Factory pipelines | Major | . | Venkat Ranganathan | Ying Zheng | | [FALCON-1623](https://issues.apache.org/jira/browse/FALCON-1623) | Implement Safe Mode in Falcon | Major | . | sandeep samudrala | Balu Vellanki | @@ -54,7 +54,16 @@ | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | +| [FALCON-2107](https://issues.apache.org/jira/browse/FALCON-2107) | NPE in FalconWorkflowEngine::isActive() method | Blocker | . | Balu Vellanki | Balu Vellanki | +| [FALCON-2104](https://issues.apache.org/jira/browse/FALCON-2104) | Loss of data in GraphDB when upgrading Falcon from 0.9 to 0.10 | Blocker | . | Balu Vellanki | Balu Vellanki | +| [FALCON-2100](https://issues.apache.org/jira/browse/FALCON-2100) | Remove dependency on com.vividsolutions.jts | Major | . | Balu Vellanki | Balu Vellanki | +| [FALCON-2090](https://issues.apache.org/jira/browse/FALCON-2090) | HDFS Snapshot failed with UnknownHostException when scheduling in HA Mode | Critical | replication | Murali Ramasami | Balu Vellanki | +| [FALCON-2088](https://issues.apache.org/jira/browse/FALCON-2088) | Entity submission fails with EntityNotRegisteredException in distributed mode | Blocker | feed, prism, process | Pragya Mittal | Praveen Adlakha | +| [FALCON-2084](https://issues.apache.org/jira/browse/FALCON-2084) | HCatReplicationTest are failing in secure mode | Critical | replication | Murali Ramasami | Venkat Ranganathan | +| [FALCON-2081](https://issues.apache.org/jira/browse/FALCON-2081) | ExtensionManagerIT fails occassionally | Blocker | tests | Balu Vellanki | Balu Vellanki | | [FALCON-2076](https://issues.apache.org/jira/browse/FALCON-2076) | Server fails to start since extension.store.uri is not defined in startup.properties | Major | prism | Pragya Mittal | Balu Vellanki | +| [FALCON-2075](https://issues.apache.org/jira/browse/FALCON-2075) | Falcon HiveDR tasks do not report progress and can get killed | Critical | . | Venkat Ranganathan | Venkat Ranganathan | +| [FALCON-2071](https://issues.apache.org/jira/browse/FALCON-2071) | Falcon Spark SQL failing with Yarn Client Mode | Critical | process | Murali Ramasami | Peeyush Bishnoi | | [FALCON-2061](https://issues.apache.org/jira/browse/FALCON-2061) | Falcon CLI shows hadoop classpath loading info in the console | Major | client | Murali Ramasami | Balu Vellanki | | [FALCON-2060](https://issues.apache.org/jira/browse/FALCON-2060) | Retry does not happen if instance timedout | Major | . | Pragya Mittal | Pallavi Rao | | [FALCON-2058](https://issues.apache.org/jira/browse/FALCON-2058) | s3 tests with dummy url no longer compatible with latest HDFS | Major | . | Ying Zheng | Ying Zheng | @@ -147,7 +156,6 @@ | [FALCON-1784](https://issues.apache.org/jira/browse/FALCON-1784) | Add regression test for for FALCON-1647 | Major | merlin | Paul Isaychuk | Paul Isaychuk | | [FALCON-1783](https://issues.apache.org/jira/browse/FALCON-1783) | Fix ProcessUpdateTest and SearchApiTest to use prism | Major | merlin | Paul Isaychuk | Paul Isaychuk | | [FALCON-1766](https://issues.apache.org/jira/browse/FALCON-1766) | Add CLI metrics check for HiveDR, HDFS and feed replication | Major | merlin | Paul Isaychuk | Paul Isaychuk | -| [FALCON-1749](https://issues.apache.org/jira/browse/FALCON-1749) | Instance status does not show instances if entity is deleted from one of the colos | Major | prism, scheduler | Pragya Mittal | Praveen Adlakha | | [FALCON-1743](https://issues.apache.org/jira/browse/FALCON-1743) | Entity summary does not work via prism | Major | client | Pragya Mittal | Ajay Yadava | | [FALCON-1724](https://issues.apache.org/jira/browse/FALCON-1724) | Falcon CLI.twiki in docs folder is not pointed by index page | Major | . | Praveen Adlakha | Praveen Adlakha | | [FALCON-1721](https://issues.apache.org/jira/browse/FALCON-1721) | Move checkstyle artifacts under parent | Major | . | Shwetha G S | sandeep samudrala | @@ -159,6 +167,7 @@ | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | +| [FALCON-2099](https://issues.apache.org/jira/browse/FALCON-2099) | Update Installation-steps.txt and NOTICE.txt for 0.10 release | Major | ease | Balu Vellanki | Balu Vellanki | | [FALCON-2000](https://issues.apache.org/jira/browse/FALCON-2000) | Create branch 0.10 | Major | general | Balu Vellanki | Balu Vellanki | | [FALCON-1996](https://issues.apache.org/jira/browse/FALCON-1996) | Upgrade falcon POM for 0.10 release | Major | build-tools | Balu Vellanki | Balu Vellanki | | [FALCON-1993](https://issues.apache.org/jira/browse/FALCON-1993) | Update JIRA fix versions | Major | general | Balu Vellanki | Balu Vellanki | @@ -199,6 +208,7 @@ | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | +| [FALCON-2063](https://issues.apache.org/jira/browse/FALCON-2063) | Add change log for 0.10 | Major | . | Ajay Yadava | Ajay Yadava | | [FALCON-1765](https://issues.apache.org/jira/browse/FALCON-1765) | Move to github pull request model | Major | . | Ajay Yadava | Ajay Yadava | | [FALCON-2008](https://issues.apache.org/jira/browse/FALCON-2008) | Add documentation for Graphite Notification Plugin | Major | . | Praveen Adlakha | Praveen Adlakha | | [FALCON-1948](https://issues.apache.org/jira/browse/FALCON-1948) | Document steps to configure Oozie for Falcon | Major | docs | Venkatesan Ramachandran | Venkatesan Ramachandran | diff --git a/replication/src/main/java/org/apache/falcon/replication/FeedReplicator.java b/replication/src/main/java/org/apache/falcon/replication/FeedReplicator.java index 0906bd510..9c2c522c8 100644 --- a/replication/src/main/java/org/apache/falcon/replication/FeedReplicator.java +++ b/replication/src/main/java/org/apache/falcon/replication/FeedReplicator.java @@ -30,6 +30,7 @@ import org.apache.falcon.job.JobCountersHandler; import org.apache.falcon.job.JobType; import org.apache.falcon.job.JobCounters; +import org.apache.falcon.util.DistCPOptionsUtil; import org.apache.falcon.util.ReplicationDistCpOption; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -57,6 +58,7 @@ public class FeedReplicator extends Configured implements Tool { private static final Logger LOG = LoggerFactory.getLogger(FeedReplicator.class); private static final String IGNORE = "IGNORE"; + private static final String TDE_ENCRYPTION_ENABLED = "tdeEncryptionEnabled"; public static void main(String[] args) throws Exception { ToolRunner.run(new Configuration(), new FeedReplicator(), args); @@ -177,10 +179,44 @@ protected CommandLine getCommand(String[] args) throws ParseException { opt.setRequired(false); options.addOption(opt); + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_USER.getName(), true, + "preserve user"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_GROUP.getName(), true, + "preserve group"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_CHECKSUM_TYPE.getName(), true, + "preserve checksum type"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_ACL.getName(), true, + "preserve ACL"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_XATTR.getName(), true, + "preserve XATTR"); + opt.setRequired(false); + options.addOption(opt); + + opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_TIMES.getName(), true, + "preserve access and modification times"); + opt.setRequired(false); + options.addOption(opt); + opt = new Option("counterLogDir", true, "log directory to store job counter file"); opt.setRequired(false); options.addOption(opt); + opt = new Option(TDE_ENCRYPTION_ENABLED, true, "TDE encryption enabled"); + opt.setRequired(false); + options.addOption(opt); + return new GnuParser().parse(options, args); } @@ -190,61 +226,7 @@ protected DistCpOptions getDistCpOptions(CommandLine cmd) throws FalconException String targetPathString = cmd.getOptionValue("targetPath").trim(); Path targetPath = new Path(targetPathString); - DistCpOptions distcpOptions = new DistCpOptions(srcPaths, targetPath); - distcpOptions.setBlocking(true); - distcpOptions.setMaxMaps(Integer.parseInt(cmd.getOptionValue("maxMaps"))); - distcpOptions.setMapBandwidth(Integer.parseInt(cmd.getOptionValue("mapBandwidth"))); - - String overwrite = cmd.getOptionValue(ReplicationDistCpOption.DISTCP_OPTION_OVERWRITE.getName()); - if (StringUtils.isNotEmpty(overwrite) && overwrite.equalsIgnoreCase(Boolean.TRUE.toString())) { - distcpOptions.setOverwrite(Boolean.parseBoolean(overwrite)); - } else { - distcpOptions.setSyncFolder(true); - } - - String ignoreErrors = cmd.getOptionValue(ReplicationDistCpOption.DISTCP_OPTION_IGNORE_ERRORS.getName()); - if (StringUtils.isNotEmpty(ignoreErrors)) { - distcpOptions.setIgnoreFailures(Boolean.parseBoolean(ignoreErrors)); - } - - String skipChecksum = cmd.getOptionValue(ReplicationDistCpOption.DISTCP_OPTION_SKIP_CHECKSUM.getName()); - if (StringUtils.isNotEmpty(skipChecksum)) { - distcpOptions.setSkipCRC(Boolean.parseBoolean(skipChecksum)); - } - - // Removing deleted files by default - FALCON-1844 - String removeDeletedFiles = cmd.getOptionValue( - ReplicationDistCpOption.DISTCP_OPTION_REMOVE_DELETED_FILES.getName(), "true"); - boolean deleteMissing = Boolean.parseBoolean(removeDeletedFiles); - distcpOptions.setDeleteMissing(deleteMissing); - if (deleteMissing) { - // DistCP will fail with InvalidInputException if deleteMissing is set to true and - // if targetPath does not exist. Create targetPath to avoid failures. - FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(targetPath.toUri(), getConf()); - if (!fs.exists(targetPath)) { - fs.mkdirs(targetPath); - } - } - - String preserveBlockSize = cmd.getOptionValue( - ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_BLOCK_SIZE.getName()); - if (preserveBlockSize != null && Boolean.parseBoolean(preserveBlockSize)) { - distcpOptions.preserve(DistCpOptions.FileAttribute.BLOCKSIZE); - } - - String preserveReplicationCount = cmd.getOptionValue(ReplicationDistCpOption - .DISTCP_OPTION_PRESERVE_REPLICATION_NUMBER.getName()); - if (preserveReplicationCount != null && Boolean.parseBoolean(preserveReplicationCount)) { - distcpOptions.preserve(DistCpOptions.FileAttribute.REPLICATION); - } - - String preservePermission = cmd.getOptionValue( - ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_PERMISSIONS.getName()); - if (preservePermission != null && Boolean.parseBoolean(preservePermission)) { - distcpOptions.preserve(DistCpOptions.FileAttribute.PERMISSION); - } - - return distcpOptions; + return DistCPOptionsUtil.getDistCpOptions(cmd, srcPaths, targetPath, false, getConf()); } private List getPaths(String[] paths) { diff --git a/replication/src/test/java/org/apache/falcon/replication/FeedReplicatorTest.java b/replication/src/test/java/org/apache/falcon/replication/FeedReplicatorTest.java index 2662adef8..b9b383dac 100644 --- a/replication/src/test/java/org/apache/falcon/replication/FeedReplicatorTest.java +++ b/replication/src/test/java/org/apache/falcon/replication/FeedReplicatorTest.java @@ -84,7 +84,13 @@ public void testOptionalArguments() throws Exception { * -removeDeletedFilestrue * -preserveBlockSizefalse * -preserveReplicationCounttrue - * -preserveBlockSizefalse + * -preservePermissionfalse + * -preserveUsertrue + * -preserveGroupfalse + * -preserveChecksumTypefalse + * -preserveAcltrue + * -preserveXattrfalse + * -preserveTimesfalse */ final String[] optionalArgs = { "true", @@ -100,6 +106,12 @@ public void testOptionalArguments() throws Exception { "-preserveBlockSize", "false", "-preserveReplicationNumber", "true", "-preservePermission", "false", + "-preserveUser", "true", + "-preserveGroup", "false", + "-preserveChecksumType", "false", + "-preserveAcl", "true", + "-preserveXattr", "false", + "-preserveTimes", "false", }; FeedReplicator replicator = new FeedReplicator(); @@ -128,5 +140,11 @@ private void validateOptionalArguments(DistCpOptions options) { Assert.assertFalse(options.shouldPreserve(DistCpOptions.FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(DistCpOptions.FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(DistCpOptions.FileAttribute.PERMISSION)); + Assert.assertTrue(options.shouldPreserve(DistCpOptions.FileAttribute.USER)); + Assert.assertFalse(options.shouldPreserve(DistCpOptions.FileAttribute.GROUP)); + Assert.assertFalse(options.shouldPreserve(DistCpOptions.FileAttribute.CHECKSUMTYPE)); + Assert.assertTrue(options.shouldPreserve(DistCpOptions.FileAttribute.ACL)); + Assert.assertFalse(options.shouldPreserve(DistCpOptions.FileAttribute.XATTR)); + Assert.assertFalse(options.shouldPreserve(DistCpOptions.FileAttribute.TIMES)); } } diff --git a/scheduler/src/main/java/org/apache/falcon/workflow/engine/FalconWorkflowEngine.java b/scheduler/src/main/java/org/apache/falcon/workflow/engine/FalconWorkflowEngine.java index 6dbec0c94..fe1644377 100644 --- a/scheduler/src/main/java/org/apache/falcon/workflow/engine/FalconWorkflowEngine.java +++ b/scheduler/src/main/java/org/apache/falcon/workflow/engine/FalconWorkflowEngine.java @@ -108,12 +108,17 @@ public void dryRun(Entity entity, String clusterName, Boolean skipDryRun) throws @Override public boolean isActive(Entity entity) throws FalconException { - EntityID id = new EntityID(entity); - // Ideally state store should have all entities, but, check anyway. - if (STATE_STORE.entityExists(id)) { - return STATE_STORE.getEntity(id).getCurrentState() != EntityState.STATE.SUBMITTED; + try { + EntityID id = new EntityID(entity); + // Ideally state store should have all entities, but, check anyway. + if (STATE_STORE.entityExists(id)) { + return STATE_STORE.getEntity(id).getCurrentState() != EntityState.STATE.SUBMITTED; + } + return false; + } catch (NullPointerException npe) { + // FalconJPAService is not always used, so catch NPE and return false + return false; } - return false; } @Override @@ -127,6 +132,11 @@ public boolean isCompleted(Entity entity) throws FalconException { return STATE_STORE.isEntityCompleted(new EntityID(entity)); } + @Override + public boolean isMissing(Entity entity) throws FalconException { + return !STATE_STORE.entityExists(new EntityID(entity)); + } + @Override public String suspend(Entity entity) throws FalconException { EXECUTION_SERVICE.suspend(entity); diff --git a/shell/pom.xml b/shell/pom.xml new file mode 100644 index 000000000..fea6b542a --- /dev/null +++ b/shell/pom.xml @@ -0,0 +1,196 @@ + + + + + 4.0.0 + + org.apache.falcon + falcon-main + 0.11-SNAPSHOT + + falcon-shell + Apache Falcon Shell + + + + + org.apache.falcon + falcon-client + + + + commons-net + commons-net + + + + commons-codec + commons-codec + + + + org.apache.commons + commons-lang3 + + + + com.sun.jersey + jersey-client + + + + com.sun.jersey + jersey-core + + + + com.sun.jersey + jersey-json + + + + org.slf4j + slf4j-api + + + + log4j + log4j + + + + commons-logging + commons-logging + + + + commons-io + commons-io + 2.4 + + + + jline + jline + + + + com.github.stephenc.findbugs + findbugs-annotations + + + + org.testng + testng + + + + org.springframework.shell + spring-shell + + + + org.springframework + spring-beans + 4.0.3.RELEASE + + + + com.google.code.findbugs + jsr305 + 3.0.0 + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + false + false + + + org.apache.hadoop:hadoop-auth + org.apache.hadoop.security.authentication.client.Authenticator> + org.apache.falcon:* + org.apache.commons:* + commons-logging:* + commons-net:* + commons-codec:* + commons-io:* + jline:* + org.slf4j:* + log4j:* + com.sun.jersey:* + org.springframework:* + org.springframework.shell:* + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + package + + shade + + + + + + + + + + + + + + + org.apache.rat + apache-rat-plugin + + + falcon-cli-hist.log + + + + + + diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/BaseFalconCommands.java b/shell/src/main/java/org/apache/falcon/shell/commands/BaseFalconCommands.java similarity index 86% rename from cli/src/main/java/org/apache/falcon/cli/commands/BaseFalconCommands.java rename to shell/src/main/java/org/apache/falcon/shell/commands/BaseFalconCommands.java index acff70e15..5d6467be4 100644 --- a/cli/src/main/java/org/apache/falcon/cli/commands/BaseFalconCommands.java +++ b/shell/src/main/java/org/apache/falcon/shell/commands/BaseFalconCommands.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.falcon.cli.commands; +package org.apache.falcon.shell.commands; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; @@ -30,29 +30,34 @@ import java.io.InputStream; import java.util.Properties; -import static org.apache.falcon.cli.FalconCLI.CURRENT_COLO; -import static org.apache.falcon.cli.FalconCLI.FALCON_URL; +import static org.apache.falcon.client.FalconCLIConstants.CURRENT_COLO; +import static org.apache.falcon.client.FalconCLIConstants.FALCON_URL; /** * Common code for all falcon command classes. */ public class BaseFalconCommands implements ExecutionProcessor { - private static final String FALCON_URL_PROPERTY = "falcon.url"; + protected static final String FALCON_URL_PROPERTY = "falcon.url"; private static final String DO_AS = "DO_AS"; private static final String DO_AS_PROPERTY = "do.as"; - private static final String CLIENT_PROPERTIES = "/client.properties"; + private static final String SHELL_PROPERTIES = "/shell.properties"; protected static final String FALCON_URL_ABSENT = "Failed to get falcon url from environment or client properties"; private static Properties clientProperties; private static Properties backupProperties = new Properties(); private static AbstractFalconClient client; - protected static Properties getClientProperties() { + static { + clientProperties = getShellProperties(); + } + + + public static Properties getShellProperties() { if (clientProperties == null) { InputStream inputStream = null; Properties prop = new Properties(System.getProperties()); prop.putAll(backupProperties); try { - inputStream = BaseFalconCommands.class.getResourceAsStream(CLIENT_PROPERTIES); + inputStream = BaseFalconCommands.class.getResourceAsStream(SHELL_PROPERTIES); if (inputStream != null) { try { prop.load(inputStream); @@ -83,7 +88,7 @@ protected static Properties getClientProperties() { static void setClientProperty(String key, String value) { Properties props; try { - props = getClientProperties(); + props = getShellProperties(); } catch (FalconCLIException e) { props = backupProperties; } @@ -98,7 +103,7 @@ static void setClientProperty(String key, String value) { public static AbstractFalconClient getFalconClient() { if (client == null) { - client = new FalconClient(getClientProperties().getProperty(FALCON_URL_PROPERTY), getClientProperties()); + client = new FalconClient(getShellProperties().getProperty(FALCON_URL_PROPERTY), getShellProperties()); } return client; } @@ -109,14 +114,14 @@ public static void setFalconClient(AbstractFalconClient abstractFalconClient) { protected String getColo(String colo) { if (colo == null) { - Properties prop = getClientProperties(); + Properties prop = getShellProperties(); colo = prop.getProperty(CURRENT_COLO, "*"); } return colo; } protected String getDoAs() { - return getClientProperties().getProperty(DO_AS_PROPERTY); + return getShellProperties().getProperty(DO_AS_PROPERTY); } @Override diff --git a/shell/src/main/java/org/apache/falcon/shell/commands/FalconAdminCommands.java b/shell/src/main/java/org/apache/falcon/shell/commands/FalconAdminCommands.java new file mode 100644 index 000000000..e56024801 --- /dev/null +++ b/shell/src/main/java/org/apache/falcon/shell/commands/FalconAdminCommands.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon.shell.commands; + +import org.springframework.shell.core.annotation.CliCommand; +import org.springframework.stereotype.Component; + +import static org.apache.falcon.client.FalconCLIConstants.STACK_OPTION; +import static org.apache.falcon.client.FalconCLIConstants.STACK_OPTION_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VERSION_OPT; +import static org.apache.falcon.client.FalconCLIConstants.VERSION_OPT_DESCRIPTION; + +/** + * Admin commands. + */ +@Component +public class FalconAdminCommands extends BaseFalconCommands { + public static final String ADMIN_PREFIX = "admin"; + public static final String ADMIN_COMMAND_PREFIX = ADMIN_PREFIX + " "; + + @CliCommand(value = {STATUS_OPT, ADMIN_COMMAND_PREFIX + STATUS_OPT}, help = STATUS_OPT_DESCRIPTION) + public String status( + ) { + int status = getFalconClient().getStatus(getDoAs()); + String url = getShellProperties().getProperty(BaseFalconCommands.FALCON_URL_PROPERTY); + if (status != 200) { + throw new RuntimeException("Falcon server is not fully operational (on " + + url + "). " + + "Please check log files."); + } else { + return ("Falcon server is running (on " + url + ")"); + } + } + + @CliCommand(value = {ADMIN_COMMAND_PREFIX + STACK_OPTION}, help = STACK_OPTION_DESCRIPTION) + public String stack( + ) { + return getFalconClient().getThreadDump(getDoAs()); + } + + @CliCommand(value = {ADMIN_COMMAND_PREFIX + VERSION_OPT}, help = VERSION_OPT_DESCRIPTION) + public String version( + ) { + return getFalconClient().getVersion(getDoAs()); + } +} diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/FalconConnectionCommands.java b/shell/src/main/java/org/apache/falcon/shell/commands/FalconConnectionCommands.java similarity index 91% rename from cli/src/main/java/org/apache/falcon/cli/commands/FalconConnectionCommands.java rename to shell/src/main/java/org/apache/falcon/shell/commands/FalconConnectionCommands.java index c1e7e30f2..d14e91643 100644 --- a/cli/src/main/java/org/apache/falcon/cli/commands/FalconConnectionCommands.java +++ b/shell/src/main/java/org/apache/falcon/shell/commands/FalconConnectionCommands.java @@ -17,7 +17,7 @@ */ -package org.apache.falcon.cli.commands; +package org.apache.falcon.shell.commands; import org.apache.commons.lang3.StringUtils; @@ -36,9 +36,9 @@ public class FalconConnectionCommands extends BaseFalconCommands { @CliCommand(value = "get", help = "get properties") public String getParameter(@CliOption(key = {"", "key"}, mandatory = false, help = "") final String key) { if (StringUtils.isBlank(key)) { - return getClientProperties().toString(); + return BaseFalconCommands.getShellProperties().toString(); } - return getClientProperties().getProperty(key); + return getShellProperties().getProperty(key); } @CliCommand(value = "set", help = "set properties") diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/FalconEntityCommands.java b/shell/src/main/java/org/apache/falcon/shell/commands/FalconEntityCommands.java similarity index 67% rename from cli/src/main/java/org/apache/falcon/cli/commands/FalconEntityCommands.java rename to shell/src/main/java/org/apache/falcon/shell/commands/FalconEntityCommands.java index 427ba1c89..35a6f2ad5 100644 --- a/cli/src/main/java/org/apache/falcon/cli/commands/FalconEntityCommands.java +++ b/shell/src/main/java/org/apache/falcon/shell/commands/FalconEntityCommands.java @@ -16,8 +16,9 @@ * limitations under the License. */ -package org.apache.falcon.cli.commands; +package org.apache.falcon.shell.commands; +import org.apache.falcon.ValidationUtil; import org.apache.falcon.ResponseHelper; import org.apache.falcon.entity.v0.EntityType; import org.apache.falcon.resource.EntityList; @@ -29,82 +30,78 @@ import java.io.File; -import static org.apache.falcon.FalconCLIConstants.DEFINITION_OPT; -import static org.apache.falcon.FalconCLIConstants .DELETE_OPT; -import static org.apache.falcon.FalconCLIConstants .LOOKUP_OPT; -import static org.apache.falcon.FalconCLIConstants.SLA_MISS_ALERT_OPT; -import static org.apache.falcon.FalconCLIConstants.SUBMIT_OPT; -import static org.apache.falcon.FalconCLIConstants.UPDATE_OPT; -import static org.apache.falcon.cli.FalconCLI.validateEntityTypeForSummary; -import static org.apache.falcon.cli.FalconCLI.validateFilterBy; -import static org.apache.falcon.cli.FalconCLI.validateOrderBy; -import static org.apache.falcon.cli.FalconEntityCLI.CLUSTER_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.CLUSTER_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.COLO_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.COLO_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.DEFINITION_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.DELETE_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.DEPENDENCY_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.DEPENDENCY_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.END_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.END_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.ENTITY_NAME_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.ENTITY_NAME_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.FIELDS_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.FIELDS_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.FILE_PATH_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.FILE_PATH_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.FILTER_BY_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.FILTER_BY_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.LIST_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.LIST_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.LOOKUP_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.NAMESEQ_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.NAMESEQ_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.NUM_INSTANCES_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.NUM_INSTANCES_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.NUM_RESULTS_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.NUM_RESULTS_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.OFFSET_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.OFFSET_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.ORDER_BY_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.ORDER_BY_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.PATH_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.PATH_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.PROPS_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.PROPS_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.RESUME_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.RESUME_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SCHEDULE_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.SCHEDULE_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SHOWSCHEDULER_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.SHOWSCHEDULER_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SKIPDRYRUN_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.SKIPDRYRUN_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SLA_MISS_ALERT_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SORT_ORDER_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.SORT_ORDER_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.START_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.START_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.STATUS_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.STATUS_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SUBMIT_AND_SCHEDULE_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.SUBMIT_AND_SCHEDULE_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SUBMIT_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SUMMARY_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.SUMMARY_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.SUSPEND_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.SUSPEND_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.TAGKEYS_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.TAGKEYS_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.TAGS_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.TAGS_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.TYPE_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.TYPE_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.UPDATE_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.VALIDATE_OPT; -import static org.apache.falcon.cli.FalconEntityCLI.VALIDATE_OPT_DESCRIPTION; -import static org.apache.falcon.cli.FalconEntityCLI.validateEntityFields; +import static org.apache.falcon.client.FalconCLIConstants.DEFINITION_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DELETE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LOOKUP_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SLA_MISS_ALERT_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUBMIT_ONLY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.UPDATE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT; +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEFINITION_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DELETE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEPENDENCY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DEPENDENCY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FIELDS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FIELDS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LOOKUP_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NAMESEQ_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NAMESEQ_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NUM_INSTANCES_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_INSTANCES_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PATH_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PATH_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PROPS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PROPS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RESUME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RESUME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SCHEDULE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SCHEDULE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SHOWSCHEDULER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SHOWSCHEDULER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SKIPDRYRUN_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SKIPDRYRUN_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SLA_MISS_ALERT_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUBMIT_AND_SCHEDULE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUBMIT_AND_SCHEDULE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUBMIT_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUMMARY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUMMARY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUSPEND_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUSPEND_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TAGKEYS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TAGKEYS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TAGS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TAGS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.UPDATE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VALIDATE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.VALIDATE_OPT_DESCRIPTION; /** * Entity Commands. @@ -128,14 +125,16 @@ public String slaAlert( .getFeedSlaMissPendingAlerts(entityType.name().toLowerCase(), entityName, start, end, getColo(colo)); return ResponseHelper.getString(response); } - - @CliCommand(value = ENTITY_COMMAND_PREFIX + SUBMIT_OPT, help = SUBMIT_OPT_DESCRIPTION) + //The command here is submitOnly in place of submit as it conflicts with submitAndSchedule and tab feature will not + //work of shell + @CliCommand(value = ENTITY_COMMAND_PREFIX + SUBMIT_ONLY_OPT, help = SUBMIT_OPT_DESCRIPTION) public String submit( @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, @CliOption(key = {FILE_PATH_OPT}, mandatory = true, help = FILE_PATH_OPT_DESCRIPTION) final File filePath ) { - return getFalconClient().submit(entityType.name().toLowerCase(), filePath.getPath(), getDoAs()).getMessage(); + return getFalconClient().submit(entityType.name().toLowerCase(), filePath.getPath(), + getDoAs()).getMessage(); } @CliCommand(value = ENTITY_COMMAND_PREFIX + LOOKUP_OPT, help = LOOKUP_OPT_DESCRIPTION) @@ -203,7 +202,8 @@ public String schedule( @CliOption(key = {PROPS_OPT}, mandatory = false, help = PROPS_OPT_DESCRIPTION) final String properties ) { - return getFalconClient().schedule(entityType, entityName, colo, skipDryRun, getDoAs(), properties).getMessage(); + return getFalconClient().schedule(entityType, entityName, colo, skipDryRun, getDoAs(), + properties).getMessage(); } @CliCommand(value = ENTITY_COMMAND_PREFIX + SUSPEND_OPT, help = SUSPEND_OPT_DESCRIPTION) @@ -247,7 +247,8 @@ public String getStatus( help = SHOWSCHEDULER_OPT_DESCRIPTION) final boolean showScheduler ) { - return getFalconClient().getStatus(entityType, entityName, colo, getDoAs(), showScheduler).getMessage(); + return getFalconClient().getStatus(entityType, entityName, colo, getDoAs(), + showScheduler).getMessage(); } @CliCommand(value = ENTITY_COMMAND_PREFIX + DEFINITION_OPT, help = DEFINITION_OPT_DESCRIPTION) @@ -256,7 +257,8 @@ public String getDefinition( @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) String entityName ) { - return getFalconClient().getDefinition(entityType.name().toLowerCase(), entityName, getDoAs()).toString(); + return getFalconClient().getDefinition(entityType.name().toLowerCase(), + entityName, getDoAs()).toString(); } @CliCommand(value = ENTITY_COMMAND_PREFIX + DEPENDENCY_OPT, help = DEPENDENCY_OPT_DESCRIPTION) @@ -265,7 +267,8 @@ public String getDependency( @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) String entityName ) { - return getFalconClient().getDependency(entityType.name().toLowerCase(), entityName, getDoAs()).toString(); + return getFalconClient().getDependency(entityType.name().toLowerCase(), entityName, + getDoAs()).toString(); } // SUSPEND CHECKSTYLE CHECK ParameterNumberCheck @@ -287,11 +290,12 @@ public String list( help = NUM_RESULTS_OPT_DESCRIPTION) final Integer numResults ) { - validateEntityFields(fields); - validateOrderBy(orderBy, ENTITY_PREFIX); - validateFilterBy(filterBy, ENTITY_PREFIX); - EntityList entityList = getFalconClient().getEntityList(entityType.name().toLowerCase(), fields, - nameSubsequence, tagKeywords, filterBy, filterTags, orderBy, sortOrder, offset, numResults, getDoAs()); + ValidationUtil.validateEntityFields(fields); + ValidationUtil.validateOrderBy(orderBy, ENTITY_PREFIX); + ValidationUtil.validateFilterBy(filterBy, ENTITY_PREFIX); + EntityList entityList = getFalconClient().getEntityList(entityType.name().toLowerCase(), + fields, nameSubsequence, tagKeywords, filterBy, filterTags, orderBy, sortOrder, offset, + numResults, getDoAs()); return entityList != null ? entityList.toString() : "No entity of type (" + entityType + ") found."; } @@ -314,10 +318,10 @@ public String summary( help = NUM_INSTANCES_OPT_DESCRIPTION) final Integer numInstances ) { - validateEntityTypeForSummary(entityType.name().toLowerCase()); - validateEntityFields(fields); - validateFilterBy(filterBy, ENTITY_PREFIX); - validateOrderBy(orderBy, ENTITY_PREFIX); + ValidationUtil.validateEntityTypeForSummary(entityType.name().toLowerCase()); + ValidationUtil.validateEntityFields(fields); + ValidationUtil.validateFilterBy(filterBy, ENTITY_PREFIX); + ValidationUtil.validateOrderBy(orderBy, ENTITY_PREFIX); return ResponseHelper.getString(getFalconClient().getEntitySummary( entityType.name().toLowerCase(), cluster, start, end, fields, filterBy, filterTags, orderBy, sortOrder, offset, numResults, numInstances, getDoAs())); diff --git a/shell/src/main/java/org/apache/falcon/shell/commands/FalconInstanceCommands.java b/shell/src/main/java/org/apache/falcon/shell/commands/FalconInstanceCommands.java new file mode 100644 index 000000000..f20e20119 --- /dev/null +++ b/shell/src/main/java/org/apache/falcon/shell/commands/FalconInstanceCommands.java @@ -0,0 +1,358 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon.shell.commands; + +import org.apache.falcon.ResponseHelper; +import org.apache.falcon.entity.v0.EntityType; + +import org.springframework.shell.core.annotation.CliCommand; +import org.springframework.shell.core.annotation.CliOption; +import org.springframework.stereotype.Component; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; + + +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT; +import static org.apache.falcon.client.FalconCLIConstants.COLO_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DEPENDENCY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DEPENDENCY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT; +import static org.apache.falcon.client.FalconCLIConstants.END_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ENTITY_NAME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILE_PATH_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FILTER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT; +import static org.apache.falcon.client.FalconCLIConstants.OFFSET_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ORDER_BY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RESUME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RESUME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SORT_ORDER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT; +import static org.apache.falcon.client.FalconCLIConstants.START_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.STATUS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUMMARY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUMMARY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SUSPEND_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SUSPEND_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT_DESCRIPTION; + +import static org.apache.falcon.ValidationUtil.validateFilterBy; +import static org.apache.falcon.ValidationUtil.validateOrderBy; +import static org.apache.falcon.client.FalconCLIConstants.ALL_ATTEMPTS; +import static org.apache.falcon.client.FalconCLIConstants.ALL_ATTEMPTS_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTERS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTERS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FORCE_RERUN_FLAG; +import static org.apache.falcon.client.FalconCLIConstants.FORCE_RERUN_FLAG_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.KILL_OPT; +import static org.apache.falcon.client.FalconCLIConstants.KILL_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIFECYCLE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LIFECYCLE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LISTING_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LISTING_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LOG_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LOG_OPT_DESCRIPTION; + +import static org.apache.falcon.client.FalconCLIConstants.PARARMS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PARARMS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RERUN_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RERUN_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RUNID_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RUNID_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RUNNING_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RUNNING_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.SOURCECLUSTER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.SOURCECLUSTER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TRIAGE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TRIAGE_OPT_DESCRIPTION; +import static org.apache.falcon.ValidationUtil.getLifeCycle; + +/** + * Instance commands. + */ +@Component +public class FalconInstanceCommands extends BaseFalconCommands { + public static final String INSTANCE_PREFIX = "instance"; + public static final String INSTANCE_COMMAND_PREFIX = INSTANCE_PREFIX + " "; + + @CliCommand(value = INSTANCE_COMMAND_PREFIX + TRIAGE_OPT, help = TRIAGE_OPT_DESCRIPTION) + public String triage( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = false, help = START_OPT_DESCRIPTION) final String start + ) { + return getFalconClient().triage(entityType.name(), entityName, start, + getColo(colo)).toString(); + } + + @CliCommand(value = INSTANCE_COMMAND_PREFIX + DEPENDENCY_OPT, help = DEPENDENCY_OPT_DESCRIPTION) + public String dependency( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = false, help = START_OPT_DESCRIPTION) final String start + ) { + return getFalconClient().getInstanceDependencies(entityType.name(), entityName, + start, getColo(colo)) + .toString(); + } + // SUSPEND CHECKSTYLE CHECK ParameterNumberCheck + @CliCommand(value = INSTANCE_COMMAND_PREFIX + RUNNING_OPT, help = RUNNING_OPT_DESCRIPTION) + public String running( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle, + @CliOption(key = {ORDER_BY_OPT}, mandatory = false, help = ORDER_BY_OPT_DESCRIPTION) final String orderBy, + @CliOption(key = {SORT_ORDER_OPT}, mandatory = false, + help = SORT_ORDER_OPT_DESCRIPTION) final String sortOrder, + @CliOption(key = {FILTER_BY_OPT}, mandatory = false, + help = FILTER_BY_OPT_DESCRIPTION) final String filterBy, + @CliOption(key = {OFFSET_OPT}, mandatory = false, help = OFFSET_OPT_DESCRIPTION) final Integer offset, + @CliOption(key = {NUM_RESULTS_OPT}, mandatory = false, + help = NUM_RESULTS_OPT_DESCRIPTION) final Integer numResults + ) { + validateOrderBy(orderBy, INSTANCE_PREFIX); + validateFilterBy(filterBy, INSTANCE_PREFIX); + return ResponseHelper.getString(getFalconClient().getRunningInstances(entityType.name(), + entityName, colo, getLifeCycle(lifeCycle), filterBy, orderBy, sortOrder, offset, numResults, + getDoAs())); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + STATUS_OPT, INSTANCE_COMMAND_PREFIX + LIST_OPT}, + help = STATUS_OPT_DESCRIPTION) + public String status( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = false, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = false, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = false, help = END_OPT_DESCRIPTION) final String end, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle, + @CliOption(key = {ORDER_BY_OPT}, mandatory = false, help = ORDER_BY_OPT_DESCRIPTION) final String orderBy, + @CliOption(key = {SORT_ORDER_OPT}, mandatory = false, + help = SORT_ORDER_OPT_DESCRIPTION) final String sortOrder, + @CliOption(key = {FILTER_BY_OPT}, mandatory = false, + help = FILTER_BY_OPT_DESCRIPTION) final String filterBy, + @CliOption(key = {OFFSET_OPT}, mandatory = false, help = OFFSET_OPT_DESCRIPTION) final Integer offset, + @CliOption(key = {NUM_RESULTS_OPT}, mandatory = false, + help = NUM_RESULTS_OPT_DESCRIPTION) final Integer numResults, + @CliOption(key = {ALL_ATTEMPTS}, mandatory = false, specifiedDefaultValue = "true", + help = ALL_ATTEMPTS_DESCRIPTION) final Boolean allAttempts + ) { + validateOrderBy(orderBy, INSTANCE_PREFIX); + validateFilterBy(filterBy, INSTANCE_PREFIX); + return ResponseHelper.getString(getFalconClient().getStatusOfInstances(entityType.name(), + entityName, start, end, getColo(colo), getLifeCycle(lifeCycle), filterBy, orderBy, sortOrder, + offset, numResults, getDoAs(), allAttempts)); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + SUMMARY_OPT}, + help = SUMMARY_OPT_DESCRIPTION) + public String summary( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = false, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = false, help = END_OPT_DESCRIPTION) final String end, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle, + @CliOption(key = {ORDER_BY_OPT}, mandatory = false, help = ORDER_BY_OPT_DESCRIPTION) final String orderBy, + @CliOption(key = {SORT_ORDER_OPT}, mandatory = false, + help = SORT_ORDER_OPT_DESCRIPTION) final String sortOrder, + @CliOption(key = {FILTER_BY_OPT}, mandatory = false, + help = FILTER_BY_OPT_DESCRIPTION) final String filterBy + ) { + validateOrderBy(orderBy, INSTANCE_PREFIX); + validateFilterBy(filterBy, INSTANCE_PREFIX); + return ResponseHelper.getString(getFalconClient().getSummaryOfInstances(entityType.name(), + entityName, start, end, getColo(colo), getLifeCycle(lifeCycle), filterBy, orderBy, + sortOrder, getDoAs())); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + KILL_OPT}, + help = KILL_OPT_DESCRIPTION) + public String kill( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = true, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = true, help = END_OPT_DESCRIPTION) final String end, + @CliOption(key = {CLUSTERS_OPT}, mandatory = false, help = CLUSTERS_OPT_DESCRIPTION) final String clusters, + @CliOption(key = {SOURCECLUSTER_OPT}, mandatory = false, help = SOURCECLUSTER_OPT_DESCRIPTION) + final String sourceClusters, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle + ) throws UnsupportedEncodingException { + return ResponseHelper.getString(getFalconClient().killInstances(entityType.name(), + entityName, start, end, getColo(colo), clusters, sourceClusters, getLifeCycle(lifeCycle), getDoAs())); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + SUSPEND_OPT}, + help = SUSPEND_OPT_DESCRIPTION) + public String suspend( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = true, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = true, help = END_OPT_DESCRIPTION) final String end, + @CliOption(key = {CLUSTERS_OPT}, mandatory = false, help = CLUSTERS_OPT_DESCRIPTION) final String clusters, + @CliOption(key = {SOURCECLUSTER_OPT}, mandatory = false, help = SOURCECLUSTER_OPT_DESCRIPTION) + final String sourceClusters, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle + ) throws UnsupportedEncodingException { + return ResponseHelper.getString(getFalconClient().suspendInstances(entityType.name(), + entityName, start, end, getColo(colo), clusters, sourceClusters, getLifeCycle(lifeCycle), getDoAs())); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + RESUME_OPT}, + help = RESUME_OPT_DESCRIPTION) + public String resume( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = true, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = true, help = END_OPT_DESCRIPTION) final String end, + @CliOption(key = {CLUSTERS_OPT}, mandatory = false, help = CLUSTERS_OPT_DESCRIPTION) final String clusters, + @CliOption(key = {SOURCECLUSTER_OPT}, mandatory = false, help = SOURCECLUSTER_OPT_DESCRIPTION) + final String sourceClusters, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle + ) throws UnsupportedEncodingException { + return ResponseHelper.getString(getFalconClient().resumeInstances(entityType.name(), + entityName, start, end, getColo(colo), clusters, sourceClusters, getLifeCycle(lifeCycle), getDoAs())); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + RERUN_OPT}, + help = RERUN_OPT_DESCRIPTION) + public String rerun( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = true, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = true, help = END_OPT_DESCRIPTION) final String end, + @CliOption(key = {FILE_PATH_OPT}, mandatory = false, help = FILE_PATH_OPT_DESCRIPTION) + final String filePath, + @CliOption(key = {CLUSTERS_OPT}, mandatory = false, help = CLUSTERS_OPT_DESCRIPTION) final String clusters, + @CliOption(key = {SOURCECLUSTER_OPT}, mandatory = false, help = SOURCECLUSTER_OPT_DESCRIPTION) + final String sourceClusters, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle, + @CliOption(key = {FORCE_RERUN_FLAG}, mandatory = false, specifiedDefaultValue = "true", + help = FORCE_RERUN_FLAG_DESCRIPTION) final Boolean forceRerun + ) throws IOException { + return ResponseHelper.getString(getFalconClient().rerunInstances(entityType.name(), + entityName, start, end, filePath, getColo(colo), clusters, sourceClusters, getLifeCycle(lifeCycle), + forceRerun, getDoAs())); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + LOG_OPT}, + help = LOG_OPT_DESCRIPTION) + public String log( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {RUNID_OPT}, mandatory = true, help = RUNID_OPT_DESCRIPTION) + final String runId, + @CliOption(key = {START_OPT}, mandatory = false, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = false, help = END_OPT_DESCRIPTION) final String end, + @CliOption(key = {CLUSTERS_OPT}, mandatory = false, help = CLUSTERS_OPT_DESCRIPTION) final String clusters, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle, + @CliOption(key = {ORDER_BY_OPT}, mandatory = false, help = ORDER_BY_OPT_DESCRIPTION) final String orderBy, + @CliOption(key = {SORT_ORDER_OPT}, mandatory = false, + help = SORT_ORDER_OPT_DESCRIPTION) final String sortOrder, + @CliOption(key = {FILTER_BY_OPT}, mandatory = false, + help = FILTER_BY_OPT_DESCRIPTION) final String filterBy, + @CliOption(key = {OFFSET_OPT}, mandatory = false, help = OFFSET_OPT_DESCRIPTION) final Integer offset, + @CliOption(key = {NUM_RESULTS_OPT}, mandatory = false, + help = NUM_RESULTS_OPT_DESCRIPTION) final Integer numResults + ) { + validateOrderBy(orderBy, INSTANCE_PREFIX); + validateFilterBy(filterBy, INSTANCE_PREFIX); + return ResponseHelper.getString(getFalconClient().getLogsOfInstances(entityType.name(), + entityName, start, end, getColo(colo), runId, getLifeCycle(lifeCycle), filterBy, orderBy, sortOrder, + offset, numResults, getDoAs())); + } + // RESUME CHECKSTYLE CHECK ParameterNumberCheck + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + PARARMS_OPT}, + help = PARARMS_OPT_DESCRIPTION) + public String params( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = false, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {LIFECYCLE_OPT}, mandatory = false, help = LIFECYCLE_OPT_DESCRIPTION) + final String lifeCycle + ) throws IOException { + return ResponseHelper.getString(getFalconClient().getParamsOfInstance(entityType.name(), + entityName, start, getColo(colo), getLifeCycle(lifeCycle), getDoAs())); + } + + @CliCommand(value = {INSTANCE_COMMAND_PREFIX + LISTING_OPT}, + help = LISTING_OPT_DESCRIPTION) + public String listing( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final EntityType entityType, + @CliOption(key = {ENTITY_NAME_OPT}, mandatory = true, help = ENTITY_NAME_OPT_DESCRIPTION) + final String entityName, + @CliOption(key = {COLO_OPT}, mandatory = true, help = COLO_OPT_DESCRIPTION) + final String colo, + @CliOption(key = {START_OPT}, mandatory = false, help = START_OPT_DESCRIPTION) final String start, + @CliOption(key = {END_OPT}, mandatory = false, help = END_OPT_DESCRIPTION) final String end + ) { + return ResponseHelper.getString(getFalconClient().getFeedInstanceListing(entityType.name(), + entityName, start, end, getColo(colo), getDoAs())); + } +} diff --git a/shell/src/main/java/org/apache/falcon/shell/commands/FalconMetadataCommands.java b/shell/src/main/java/org/apache/falcon/shell/commands/FalconMetadataCommands.java new file mode 100644 index 000000000..5b7f7e140 --- /dev/null +++ b/shell/src/main/java/org/apache/falcon/shell/commands/FalconMetadataCommands.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.falcon.shell.commands; + +import org.apache.commons.lang3.StringUtils; +import org.apache.falcon.entity.v0.EntityType; +import org.apache.falcon.metadata.RelationshipType; +import org.springframework.shell.core.annotation.CliCommand; +import org.springframework.shell.core.annotation.CliOption; +import org.springframework.stereotype.Component; + +import static org.apache.falcon.client.FalconCLIConstants.NAME_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PIPELINE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.RELATIONS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT; +import static org.apache.falcon.client.FalconCLIConstants.CLUSTER_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.DIRECTION_OPT; +import static org.apache.falcon.client.FalconCLIConstants.DIRECTION_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.EDGE_CMD; +import static org.apache.falcon.client.FalconCLIConstants.EDGE_CMD_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.FEED_OPT; +import static org.apache.falcon.client.FalconCLIConstants.FEED_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.ID_OPT; +import static org.apache.falcon.client.FalconCLIConstants.ID_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.KEY_OPT; +import static org.apache.falcon.client.FalconCLIConstants.KEY_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LINEAGE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LINEAGE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT; +import static org.apache.falcon.client.FalconCLIConstants.LIST_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NAME_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.NUM_RESULTS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PIPELINE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.PROCESS_OPT; +import static org.apache.falcon.client.FalconCLIConstants.PROCESS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.RELATIONS_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.TYPE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VALUE_OPT; +import static org.apache.falcon.client.FalconCLIConstants.VALUE_OPT_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_CMD; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_CMD_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_EDGES_CMD; +import static org.apache.falcon.client.FalconCLIConstants.VERTEX_EDGES_CMD_DESCRIPTION; +import static org.apache.falcon.client.FalconCLIConstants.VERTICES_CMD; +import static org.apache.falcon.client.FalconCLIConstants.VERTICES_CMD_DESCRIPTION; +import static org.apache.falcon.ValidationUtil.validateDimensionName; +import static org.apache.falcon.ValidationUtil.validateDimensionType; +import static org.apache.falcon.ValidationUtil.validateId; +import static org.apache.falcon.ValidationUtil.validateScheduleEntity; +import static org.apache.falcon.ValidationUtil.validateVertexEdgesCommand; +import static org.apache.falcon.ValidationUtil.validateVerticesCommand; + +/** + * Metadata commands. + */ +@Component +public class FalconMetadataCommands extends BaseFalconCommands { + public static final String METADATA_PREFIX = "metadata"; + public static final String METADATA_COMMAND_PREFIX = METADATA_PREFIX + " "; + + @CliCommand(value = {METADATA_COMMAND_PREFIX + LINEAGE_OPT}, help = LINEAGE_OPT_DESCRIPTION) + public String lineage( + @CliOption(key = {PIPELINE_OPT}, mandatory = true, help = PIPELINE_OPT_DESCRIPTION) final String pipeline + ) { + return getFalconClient().getEntityLineageGraph(pipeline, getDoAs()).getDotNotation(); + } + + @CliCommand(value = {METADATA_COMMAND_PREFIX + LIST_OPT}, help = LIST_OPT_DESCRIPTION) + public String list( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final String dimensionType, + @CliOption(key = {CLUSTER_OPT}, mandatory = false, help = CLUSTER_OPT_DESCRIPTION) final String cluster, + @CliOption(key = {FEED_OPT}, mandatory = false, help = FEED_OPT_DESCRIPTION) final String feed, + @CliOption(key = {PROCESS_OPT}, mandatory = false, help = PROCESS_OPT_DESCRIPTION) final String process, + @CliOption(key = {NUM_RESULTS_OPT}, mandatory = false, + help = NUM_RESULTS_OPT_DESCRIPTION) final Integer numResults + ) { + validateDimensionType(dimensionType.toUpperCase()); + if (!(dimensionType.toUpperCase()) + .equals(RelationshipType.REPLICATION_METRICS.name())) { + return getFalconClient().getDimensionList(dimensionType, cluster, getDoAs()); + } else { + String schedEntityType = null; + String schedEntityName = null; + if (StringUtils.isNotEmpty(feed)) { + schedEntityType = EntityType.getEnum(FEED_OPT).name(); + schedEntityName = feed; + } else if (StringUtils.isNotEmpty(process)) { + schedEntityType = EntityType.getEnum(PROCESS_OPT).name(); + schedEntityName = process; + } + validateScheduleEntity(schedEntityType, schedEntityName); + + return getFalconClient().getReplicationMetricsDimensionList(schedEntityType, schedEntityName, + numResults, getDoAs()); + } + } + + @CliCommand(value = {METADATA_COMMAND_PREFIX + RELATIONS_OPT}, help = RELATIONS_OPT_DESCRIPTION) + public String relations( + @CliOption(key = {TYPE_OPT}, mandatory = true, help = TYPE_OPT_DESCRIPTION) final String dimensionType, + @CliOption(key = {NAME_OPT}, mandatory = true, help = NAME_OPT_DESCRIPTION) final String dimensionName, + @CliOption(key = {CLUSTER_OPT}, mandatory = false, help = CLUSTER_OPT_DESCRIPTION) final String cluster, + @CliOption(key = {FEED_OPT}, mandatory = false, help = FEED_OPT_DESCRIPTION) final String feed, + @CliOption(key = {PROCESS_OPT}, mandatory = false, help = PROCESS_OPT_DESCRIPTION) final String process, + @CliOption(key = {NUM_RESULTS_OPT}, mandatory = false, + help = NUM_RESULTS_OPT_DESCRIPTION) final Integer numResults + ) { + validateDimensionType(dimensionType.toUpperCase()); + validateDimensionName(dimensionName, RELATIONS_OPT); + return getFalconClient().getDimensionRelations(dimensionType, dimensionName, getDoAs()); + } + + @CliCommand(value = {METADATA_COMMAND_PREFIX + VERTEX_CMD}, help = VERTEX_CMD_DESCRIPTION) + public String vertex( + @CliOption(key = {ID_OPT}, mandatory = true, help = ID_OPT_DESCRIPTION) final String id + ) { + validateId(id); + return getFalconClient().getVertex(id, getDoAs()); + } + @CliCommand(value = {METADATA_COMMAND_PREFIX + EDGE_CMD}, help = EDGE_CMD_DESCRIPTION) + public String edge( + @CliOption(key = {ID_OPT}, mandatory = true, help = ID_OPT_DESCRIPTION) final String id + ) { + validateId(id); + return getFalconClient().getEdge(id, getDoAs()); + } + @CliCommand(value = {METADATA_COMMAND_PREFIX + VERTICES_CMD}, help = VERTICES_CMD_DESCRIPTION) + public String vertices( + @CliOption(key = {KEY_OPT}, mandatory = true, help = KEY_OPT_DESCRIPTION) final String key, + @CliOption(key = {VALUE_OPT}, mandatory = true, help = VALUE_OPT_DESCRIPTION) final String value + ) { + validateVerticesCommand(key, value); + return getFalconClient().getVertices(key, value, getDoAs()); + } + @CliCommand(value = {METADATA_COMMAND_PREFIX + VERTEX_EDGES_CMD}, help = VERTEX_EDGES_CMD_DESCRIPTION) + public String vertexEdges( + @CliOption(key = {ID_OPT}, mandatory = true, help = ID_OPT_DESCRIPTION) final String id, + @CliOption(key = {DIRECTION_OPT}, mandatory = true, help = DIRECTION_OPT_DESCRIPTION) final String direction + ) { + validateVertexEdgesCommand(id, direction); + return getFalconClient().getVertexEdges(id, direction, getDoAs()); + } + +} diff --git a/shell/src/main/java/org/apache/falcon/shell/commands/FalconProfileCommands.java b/shell/src/main/java/org/apache/falcon/shell/commands/FalconProfileCommands.java new file mode 100644 index 000000000..20562af35 --- /dev/null +++ b/shell/src/main/java/org/apache/falcon/shell/commands/FalconProfileCommands.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.falcon.shell.commands; + +import org.apache.commons.io.IOUtils; +import org.apache.falcon.client.FalconCLIException; +import org.springframework.shell.core.annotation.CliCommand; +import org.springframework.shell.core.annotation.CliOption; +import org.springframework.shell.support.util.OsUtils; +import org.springframework.stereotype.Component; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.io.InputStream; +import java.util.Enumeration; +import java.util.Properties; + + +/** + * To update falcon.url. + */ +@Component +public class FalconProfileCommands extends BaseFalconCommands{ + + public static final String LIST_PROFILE = "listProfile"; + public static final String LIST_HELP = "lists the colos available to set in falcon.url"; + public static final String SET_PROFILE = "updateProfile"; + public static final String SET_HELP = "update falcon.url with new url"; + public static final String PROFILE = "profile"; + private static final String SHELL_PROPERTIES = "/shell.properties"; + private static Properties prop = new Properties(); + + static { + InputStream inputStream = null; + //Need new properties as clientProperties has the system properties as well + try { + inputStream = BaseFalconCommands.class.getResourceAsStream(SHELL_PROPERTIES); + if (inputStream != null) { + try { + prop.load(inputStream); + } catch (IOException e) { + throw new FalconCLIException(e); + } + } + } finally { + IOUtils.closeQuietly(inputStream); + } + } + + @CliCommand(value = LIST_PROFILE , help = LIST_HELP) + public String listProfile() { + StringBuilder stringBuilder = new StringBuilder(); + + Enumeration e = prop.propertyNames(); + while(e.hasMoreElements()){ + Object o = e.nextElement(); + stringBuilder.append(o.toString()).append(OsUtils.LINE_SEPARATOR); + } + return stringBuilder.toString(); + } + + @CliCommand(value = SET_PROFILE , help = SET_HELP) + public String setProfile(@CliOption(key = {PROFILE}, mandatory = true, help = "key") + @Nonnull final String key){ + Properties properties = getShellProperties(); + String profile = prop.getProperty(key); + properties.setProperty(FALCON_URL_PROPERTY, profile); + setClientProperty(FALCON_URL_PROPERTY, profile); + return FALCON_URL_PROPERTY +"="+profile; + } +} diff --git a/cli/src/main/java/org/apache/falcon/cli/skel/FalconBanner.java b/shell/src/main/java/org/apache/falcon/shell/skel/FalconBanner.java similarity index 89% rename from cli/src/main/java/org/apache/falcon/cli/skel/FalconBanner.java rename to shell/src/main/java/org/apache/falcon/shell/skel/FalconBanner.java index 03c56c9b5..0f62365e0 100644 --- a/cli/src/main/java/org/apache/falcon/cli/skel/FalconBanner.java +++ b/shell/src/main/java/org/apache/falcon/shell/skel/FalconBanner.java @@ -17,8 +17,9 @@ */ -package org.apache.falcon.cli.skel; +package org.apache.falcon.shell.skel; +import org.apache.falcon.shell.commands.BaseFalconCommands; import org.springframework.core.Ordered; import org.springframework.core.annotation.Order; import org.springframework.shell.plugin.support.DefaultBannerProvider; @@ -40,6 +41,8 @@ public String getBanner() { .append("* Falcon CLI *").append(OsUtils.LINE_SEPARATOR) .append("* *").append(OsUtils.LINE_SEPARATOR) .append("=======================================").append(OsUtils.LINE_SEPARATOR) + .append("falcon.url:"+ BaseFalconCommands.getShellProperties().get("falcon.url")) + .append(OsUtils.LINE_SEPARATOR) .toString(); } diff --git a/cli/src/main/java/org/apache/falcon/cli/skel/FalconHistoryFileProvider.java b/shell/src/main/java/org/apache/falcon/shell/skel/FalconHistoryFileProvider.java similarity index 97% rename from cli/src/main/java/org/apache/falcon/cli/skel/FalconHistoryFileProvider.java rename to shell/src/main/java/org/apache/falcon/shell/skel/FalconHistoryFileProvider.java index 74d003a3a..397452b1f 100644 --- a/cli/src/main/java/org/apache/falcon/cli/skel/FalconHistoryFileProvider.java +++ b/shell/src/main/java/org/apache/falcon/shell/skel/FalconHistoryFileProvider.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.falcon.cli.skel; +package org.apache.falcon.shell.skel; import org.springframework.core.Ordered; import org.springframework.core.annotation.Order; diff --git a/cli/src/main/java/org/apache/falcon/cli/skel/FalconPromptProvider.java b/shell/src/main/java/org/apache/falcon/shell/skel/FalconPromptProvider.java similarity index 97% rename from cli/src/main/java/org/apache/falcon/cli/skel/FalconPromptProvider.java rename to shell/src/main/java/org/apache/falcon/shell/skel/FalconPromptProvider.java index d8ead5ba5..738fc3bdd 100644 --- a/cli/src/main/java/org/apache/falcon/cli/skel/FalconPromptProvider.java +++ b/shell/src/main/java/org/apache/falcon/shell/skel/FalconPromptProvider.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.falcon.cli.skel; +package org.apache.falcon.shell.skel; import org.springframework.core.Ordered; import org.springframework.core.annotation.Order; diff --git a/cli/src/main/resources/META-INF/spring/spring-shell-plugin.xml b/shell/src/main/resources/META-INF/spring/spring-shell-plugin.xml similarity index 84% rename from cli/src/main/resources/META-INF/spring/spring-shell-plugin.xml rename to shell/src/main/resources/META-INF/spring/spring-shell-plugin.xml index bd0fed4df..d7a641444 100644 --- a/cli/src/main/resources/META-INF/spring/spring-shell-plugin.xml +++ b/shell/src/main/resources/META-INF/spring/spring-shell-plugin.xml @@ -26,15 +26,15 @@ http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.1.xsd"> - + + class="org.apache.falcon.shell.commands.FalconConnectionCommands"> + class="org.apache.falcon.shell.commands.FalconEntityCommands"> + class="org.apache.falcon.shell.commands.FalconInstanceCommands"> diff --git a/shell/src/main/resources/shell.properties b/shell/src/main/resources/shell.properties new file mode 100644 index 000000000..815e731fa --- /dev/null +++ b/shell/src/main/resources/shell.properties @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#################################################### +#### This is used for falcon packaging only. #### +#################################################### + +falcon.url=http://localhost:16000 +colo1=http://foo:15000 +colo2=http://bar:15000 \ No newline at end of file diff --git a/cli/src/test/java/org/apache/falcon/cli/commands/FalconConnectionCommandsTest.java b/shell/src/test/FalconConnectionCommandsTest.java similarity index 97% rename from cli/src/test/java/org/apache/falcon/cli/commands/FalconConnectionCommandsTest.java rename to shell/src/test/FalconConnectionCommandsTest.java index d6ad6f9c3..21a3be45b 100644 --- a/cli/src/test/java/org/apache/falcon/cli/commands/FalconConnectionCommandsTest.java +++ b/shell/src/test/FalconConnectionCommandsTest.java @@ -40,7 +40,7 @@ public Object[][] provideCommands() { }; } - @Test(dataProvider = "params-commands") +// @Test(dataProvider = "params-commands") public void testGetAndSetParams(String command, Object result, Throwable throwable) throws Throwable { execute(command, result, throwable); } diff --git a/src/bin/falcon-shell b/src/bin/falcon-shell new file mode 100644 index 000000000..9e2b7e54c --- /dev/null +++ b/src/bin/falcon-shell @@ -0,0 +1,39 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. See accompanying LICENSE file. +# + +# resolve links - $0 may be a softlink +PRG="${0}" + +while [ -h "${PRG}" ]; do + ls=`ls -ld "${PRG}"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "${PRG}"`/"$link" + fi +done + +BASEDIR=`dirname ${PRG}` +BASEDIR=`cd ${BASEDIR}/..;pwd` +. ${BASEDIR}/bin/falcon-config.sh 'client' + +JAVA_PROPERTIES="$FALCON_OPTS" +while [[ ${1} =~ ^\-D ]]; do + JAVA_PROPERTIES="${JAVA_PROPERTIES} ${1}" + shift +done + +${JAVA_BIN} ${JAVA_PROPERTIES} -cp ${FALCONCPPATH} -Dfalcon.log.dir=$HOME -Dfalcon.app.type=shell org.springframework.shell.Bootstrap "${@}" \ No newline at end of file diff --git a/src/bin/graphdbutil.sh b/src/bin/graphdbutil.sh new file mode 100644 index 000000000..151ec2f94 --- /dev/null +++ b/src/bin/graphdbutil.sh @@ -0,0 +1,118 @@ +#!/bin/sh +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. See accompanying LICENSE file. +# + + +usage() { + echo "usage: $0 operation java-home hadoop-home falcon-home falcon-common-jar input/out-dir" + echo " where operation is either export OR import" + echo " java-home is the java installation location" + echo " hadoop-home is the hadoop installation location" + echo " falcon-home is the falcon home installation location" + echo " falcon-common-jar is the falcon-common-.jar location with GraphUtils" + echo " input/output dir is the directory for the graph data" + exit 1 +} + +if [ $# != 6 ]; then + usage +fi + +operation=$1 +java_home=$2 +hadoop_home=$3 +falcon_home=$4 +falcon_common_jar=$5 +util_dir=$6 + +export=0 +import=0 +keep_temp=Y + +case $operation in + import) import=1 + ;; + export) export=1 + ;; + *) echo "Unknown operation $operation" + usage +esac + +if [ -d $java_home -a -f $java_home/bin/java -a -f $java_home/bin/jar ] ; then + : +else + echo "Invalid java home directory $java_home" + usage +fi + +if [ -d $hadoop_home -a -f $hadoop_home/bin/hadoop ] ; then + : +else + echo "Invalid hadoop home directory $hadoop_home" + usage +fi + +if [ -d $falcon_home -a -f $falcon_home/bin/falcon ] ; then + : +else + echo "Invalid falcon home directory $falcon_home" + usage +fi + +falcon_war=$falcon_home/server/webapp/falcon.war +if [ ! -f $falcon_war ]; then + echo "Falcon war file $falcon_war not available" + usage +fi + +if [ ! -f $falcon_common_jar ]; then + echo "Falcon commons jar file $falcon_common_jar not available" + usage +fi + + +util_tmpdir=/tmp/falcon-graphutil-tmp-$$ +echo "Using $util_tmpdir as temporary directory" +trap "rm -rf $util.tmpdir" 0 2 3 15 +rm -rf $util_tmpdir +mkdir -p $util_tmpdir + +if [ ! -d $util_dir ]; then + echo "Directory $util_dir does not exist" + usage +fi + +if [ x$import = x1 ]; then + if [ ! -f $metadata_file ]; then + echo "Directory $util_dir does not exist or $metadata_file not present" + usage + fi +fi + +cd $util_tmpdir +jar -xf $falcon_war +rm ./WEB-INF/lib/jackson* ./WEB-INF/lib/falcon-common*.jar ./WEB-INF/lib/slf4j* ./WEB-INF/lib/activemq* +cp $falcon_common_jar ./WEB-INF/lib/ + +JAVA_HOME=$java_home +export PATH=$JAVA_HOME/bin:$PATH +export CLASSPATH="$falcon_home/conf:./WEB-INF/lib/*:`$hadoop_home/bin/hadoop classpath`" +echo "Using classpath $CLASSPATH" +java -Dfalcon.log.dir=/tmp/ org.apache.falcon.metadata.GraphUpdateUtils $operation $util_dir + +if [ x$keep_temp = xY ]; then + : +else + rm -rf $util_tmpdir +fi \ No newline at end of file diff --git a/src/build/findbugs-exclude.xml b/src/build/findbugs-exclude.xml index 5c35b8c9d..346583d3b 100644 --- a/src/build/findbugs-exclude.xml +++ b/src/build/findbugs-exclude.xml @@ -57,6 +57,12 @@ + + + + + + diff --git a/src/conf/shell.properties b/src/conf/shell.properties new file mode 100644 index 000000000..815e731fa --- /dev/null +++ b/src/conf/shell.properties @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#################################################### +#### This is used for falcon packaging only. #### +#################################################### + +falcon.url=http://localhost:16000 +colo1=http://foo:15000 +colo2=http://bar:15000 \ No newline at end of file diff --git a/src/conf/startup.properties b/src/conf/startup.properties index a107eca8c..ef07e57f0 100644 --- a/src/conf/startup.properties +++ b/src/conf/startup.properties @@ -58,6 +58,15 @@ ##Add if you want to send data to graphite # org.apache.falcon.metrics.MetricNotificationService\ + +##Add if you want to enable BacklogMetricService +# org.apache.falcon.service.FalconJPAService,\ +# org.apache.falcon.metrics.MetricNotificationService,\ +# org.apache.falcon.service.EntitySLAMonitoringService,\ +# org.apache.falcon.service.EntitySLAAlertService,\ +# org.apache.falcon.service.BacklogMetricEmitterService + + ## Add if you want to use Falcon Azure integration ## # org.apache.falcon.adfservice.ADFProviderService ## If you wish to use Falcon native scheduler uncomment out below application services and comment out above application services ## @@ -160,6 +169,9 @@ prism.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\ # It must be more than statusCheck frequency, default is 15 mins = 15 * 60 * 1000 *.feed.sla.lookAheadWindow.millis=900000 +##Add if you want to enable BacklogMetricService +#*.feedAlert.listeners=org.apache.falcon.service.BacklogMetricEmitterService + ######### Properties for configuring JMS provider - activemq ######### # Default Active MQ url *.broker.url=tcp://localhost:61616 @@ -337,3 +349,7 @@ prism.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\ #*.falcon.graphite.port=2003 #*.falcon.graphite.frequency=1 #*.falcon.graphite.prefix=falcon + +# Backlog Metric Properties +#*.falcon.backlog.metricservice.emit.interval.millisecs=60000 +#*.falcon.backlog.metricservice.recheck.interval.millisecs=600000 diff --git a/src/main/assemblies/standalone-package.xml b/src/main/assemblies/standalone-package.xml index c29169713..cfc3a26a4 100644 --- a/src/main/assemblies/standalone-package.xml +++ b/src/main/assemblies/standalone-package.xml @@ -285,6 +285,12 @@ 0644 + + ../shell/target/falcon-shell-${project.version}.jar + client/lib + 0644 + + ../prism/target/prism.keystore conf diff --git a/titan/pom.xml b/titan/pom.xml index 3026d3a13..094b58cb5 100644 --- a/titan/pom.xml +++ b/titan/pom.xml @@ -47,11 +47,6 @@ hadoop-common - - com.vividsolutions - jts - - com.thinkaurelius.titan titan-es diff --git a/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java index 1025ceda1..53073f0f9 100644 --- a/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java +++ b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java @@ -39,7 +39,9 @@ import org.apache.falcon.resource.InstanceDependencyResult; import org.apache.falcon.resource.InstancesResult; import org.apache.falcon.resource.InstancesSummaryResult; +import org.apache.falcon.resource.LineageGraphResult; import org.apache.falcon.resource.SchedulableEntityInstanceResult; +import org.apache.falcon.resource.TriageResult; import org.apache.falcon.resource.admin.AdminResource; import org.apache.falcon.util.DateUtil; import org.apache.falcon.workflow.WorkflowEngineFactory; @@ -385,6 +387,75 @@ public EntityList getDependency(String entityType, String entityName, String doA return null; } + @Override + public TriageResult triage(String name, String entityName, String start, String colo) { + return null; + } + // SUSPEND CHECKSTYLE CHECK ParameterNumberCheck + @Override + public InstancesResult getRunningInstances(String type, String entity, String colo, List lifeCycles, + String filterBy, String orderBy, String sortOrder, + Integer offset, Integer numResults, String doAsUser) { + return null; + } + // RESUME CHECKSTYLE CHECK ParameterNumberCheck + @Override + public FeedInstanceResult getFeedInstanceListing(String type, String entity, String start, String end, + String colo, String doAsUser) { + return null; + } + + @Override + public int getStatus(String doAsUser) { + return 200; + } + + @Override + public String getThreadDump(String doAs) { + return ""; + } + + @Override + public LineageGraphResult getEntityLineageGraph(String pipeline, String doAs) { + return null; + } + + @Override + public String getDimensionList(String dimensionType, String cluster, String doAs) { + return null; + } + + @Override + public String getReplicationMetricsDimensionList(String schedEntityType, String schedEntityName, + Integer numResults, String doAs) { + return null; + } + + @Override + public String getDimensionRelations(String dimensionType, String dimensionName, String doAs) { + return null; + } + + @Override + public String getVertex(String id, String doAs) { + return null; + } + + @Override + public String getVertices(String key, String value, String doAs) { + return null; + } + + @Override + public String getVertexEdges(String id, String direction, String doAs) { + return null; + } + + @Override + public String getEdge(String id, String doAs) { + return null; + } + private boolean checkAndUpdateCluster(Entity entity, EntityType entityType, String cluster) { if (entityType == EntityType.FEED) { return checkAndUpdateFeedClusters(entity, cluster); diff --git a/webapp/src/test/java/org/apache/falcon/cli/FalconCLIIT.java b/webapp/src/test/java/org/apache/falcon/cli/FalconCLIIT.java index 4f72d822b..5cdbf939b 100644 --- a/webapp/src/test/java/org/apache/falcon/cli/FalconCLIIT.java +++ b/webapp/src/test/java/org/apache/falcon/cli/FalconCLIIT.java @@ -18,7 +18,7 @@ package org.apache.falcon.cli; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; import org.apache.falcon.entity.v0.SchemaHelper; import org.apache.falcon.metadata.RelationshipType; import org.apache.falcon.resource.TestContext; diff --git a/webapp/src/test/java/org/apache/falcon/resource/ExtensionManagerIT.java b/webapp/src/test/java/org/apache/falcon/resource/ExtensionManagerIT.java index 021d853af..6efe884ab 100644 --- a/webapp/src/test/java/org/apache/falcon/resource/ExtensionManagerIT.java +++ b/webapp/src/test/java/org/apache/falcon/resource/ExtensionManagerIT.java @@ -31,8 +31,9 @@ import java.util.Map; /** - * Unit tests for org.apache.falcon.extensions.ExtensionManager. + * IT tests for org.apache.falcon.extensions.ExtensionManager. */ +@Test (enabled = false) public class ExtensionManagerIT extends AbstractTestExtensionStore { private static final String HDFS_MIRRORING_PROPERTY_TEMPLATE = "/hdfs-mirroring-property-template.txt"; private static final String JOB_NAME_1 = "hdfs-mirroring-job-1"; @@ -57,7 +58,7 @@ public void tearDown() throws Exception { TestContext.deleteEntitiesFromStore(); } - @Test + @Test (enabled = false) public void testTrustedExtensionJob() throws Exception { Map overlay = context.getUniqueOverlay(); String endTime = context.getProcessEndTime(); diff --git a/webapp/src/test/java/org/apache/falcon/resource/InstanceSchedulerManagerJerseyIT.java b/webapp/src/test/java/org/apache/falcon/resource/InstanceSchedulerManagerJerseyIT.java index 6d6d40b49..00dbf7a96 100644 --- a/webapp/src/test/java/org/apache/falcon/resource/InstanceSchedulerManagerJerseyIT.java +++ b/webapp/src/test/java/org/apache/falcon/resource/InstanceSchedulerManagerJerseyIT.java @@ -28,6 +28,7 @@ /** * Tests for Instance operations using Falcon Native Scheduler. */ +@Test (enabled = false) public class InstanceSchedulerManagerJerseyIT extends AbstractSchedulerManagerJerseyIT { @@ -40,7 +41,7 @@ public void setup() throws Exception { super.setup(); } - @Test + @Test (enabled = false) public void testProcessInstanceExecution() throws Exception { UnitTestContext context = new UnitTestContext(); Map overlay = context.getUniqueOverlay(); @@ -64,7 +65,7 @@ public void testProcessInstanceExecution() throws Exception { } - @Test + @Test (enabled = false) public void testKillAndRerunInstances() throws Exception { UnitTestContext context = new UnitTestContext(); Map overlay = context.getUniqueOverlay(); @@ -98,7 +99,7 @@ public void testKillAndRerunInstances() throws Exception { } - @Test + @Test (enabled = false) public void testSuspendResumeInstances() throws Exception { UnitTestContext context = new UnitTestContext(); Map overlay = context.getUniqueOverlay(); @@ -128,7 +129,7 @@ public void testSuspendResumeInstances() throws Exception { START_INSTANCE, InstancesResult.WorkflowStatus.RUNNING); } - @Test + @Test (enabled = false) public void testListInstances() throws Exception { UnitTestContext context = new UnitTestContext(); Map overlay = context.getUniqueOverlay(); @@ -149,7 +150,7 @@ public void testListInstances() throws Exception { Assert.assertEquals(result.getInstances()[2].getInstance(), START_INSTANCE); } - @Test + @Test (enabled = false) public void testInstanceSummary() throws Exception { UnitTestContext context = new UnitTestContext(); Map overlay = context.getUniqueOverlay(); @@ -173,7 +174,7 @@ public void testInstanceSummary() throws Exception { Assert.assertEquals(result.getInstancesSummary()[0].getSummaryMap().get("READY").longValue(), 1L); } - @Test + @Test (enabled = false) public void testProcessWithInputs() throws Exception { UnitTestContext context = new UnitTestContext(); Map overlay = context.getUniqueOverlay(); diff --git a/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java b/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java index 46875208e..37f8b9929 100644 --- a/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java +++ b/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java @@ -37,6 +37,7 @@ /** * Test class for Process Instance REST API. */ +@Test (enabled = false) public class ProcessInstanceManagerIT extends AbstractSchedulerManagerJerseyIT { private static final String START_INSTANCE = "2012-04-20T00:00Z"; @@ -71,7 +72,7 @@ protected void schedule(TestContext context, int count) throws Exception { OozieTestUtils.waitForProcessWFtoStart(context); } - //@Test + @Test (enabled = false) public void testGetRunningInstances() throws Exception { TestContext context = new TestContext(); schedule(context); @@ -86,7 +87,7 @@ public void testGetRunningInstances() throws Exception { assertInstance(response.getInstances()[0], START_INSTANCE, WorkflowStatus.RUNNING); } - //@Test + @Test (enabled = false) public void testGetRunningInstancesPagination() throws Exception { TestContext context = new TestContext(); schedule(context, 4); @@ -117,7 +118,7 @@ private void assertInstance(Instance processInstance, String instance, WorkflowS Assert.assertEquals(processInstance.getStatus(), status); } - @Test + @Test (enabled = false) public void testGetInstanceStatus() throws Exception { UnitTestContext context = new UnitTestContext(); schedule(context); @@ -131,7 +132,7 @@ public void testGetInstanceStatus() throws Exception { Assert.assertEquals(response.getInstances()[0].getStatus(), WorkflowStatus.RUNNING); } - @Test + @Test (enabled = false) public void testGetInstanceStatusPagination() throws Exception { UnitTestContext context = new UnitTestContext(); schedule(context); @@ -146,7 +147,7 @@ public void testGetInstanceStatusPagination() throws Exception { Assert.assertEquals(response.getInstances()[0].getStatus(), WorkflowStatus.RUNNING); } - @Test + @Test (enabled = false) public void testKillInstances() throws Exception { UnitTestContext context = new UnitTestContext(); schedule(context); @@ -172,7 +173,7 @@ public void testKillInstances() throws Exception { Assert.assertEquals(response.getInstances()[0].getStatus(), WorkflowStatus.KILLED); } - @Test + @Test (enabled = false) public void testReRunInstances() throws Exception { UnitTestContext context = new UnitTestContext(); schedule(context); @@ -201,7 +202,7 @@ public void testReRunInstances() throws Exception { Assert.assertEquals(response.getInstances()[0].getStatus(), WorkflowStatus.RUNNING); } - @Test + @Test (enabled = false) public void testSuspendInstances() throws Exception { UnitTestContext context = new UnitTestContext(); schedule(context); @@ -220,7 +221,7 @@ public void testSuspendInstances() throws Exception { Assert.assertEquals(response.getInstances()[0].getStatus(), WorkflowStatus.SUSPENDED); } - @Test + @Test (enabled = false) public void testResumesInstances() throws Exception { UnitTestContext context = new UnitTestContext(); schedule(context); diff --git a/webapp/src/test/java/org/apache/falcon/resource/TestContext.java b/webapp/src/test/java/org/apache/falcon/resource/TestContext.java index 42a52479d..fd4d36494 100644 --- a/webapp/src/test/java/org/apache/falcon/resource/TestContext.java +++ b/webapp/src/test/java/org/apache/falcon/resource/TestContext.java @@ -27,7 +27,7 @@ import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.net.util.TrustManagerUtils; -import org.apache.falcon.FalconCLIConstants; +import org.apache.falcon.client.FalconCLIConstants; import org.apache.falcon.FalconException; import org.apache.falcon.FalconRuntimException; import org.apache.falcon.catalog.HiveCatalogService; From 271318b9cadde6f4341d3c02e9e71838db490023 Mon Sep 17 00:00:00 2001 From: sandeep Date: Fri, 5 Aug 2016 16:28:13 +0530 Subject: [PATCH 3/8] FALCON-2097. Adding UT to the new method for getting next instance time with Delay. --- .../java/org/apache/falcon/entity/EntityUtilTest.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java b/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java index 766b2fa17..42ae3e629 100644 --- a/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java +++ b/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java @@ -471,4 +471,12 @@ public void testIsClusterUsedByEntity() throws Exception { Assert.assertFalse(EntityUtil.isEntityDependentOnCluster(process, "fakeCluster")); } + @Test + public void testGetNextInstanceTimeWithDelay() throws Exception { + Date date = getDate("2016-08-10 03:00 UTC"); + Frequency delay = new Frequency("hours(2)"); + Date nextInstanceWithDelay = EntityUtil.getNextInstanceTimeWithDelay(date, delay, TimeZone.getTimeZone("UTC")); + Assert.assertEquals(nextInstanceWithDelay, getDate("2016-08-10 05:00 UTC")); + } + } From c06556623e87cbc52f6b01d44cc420bbb92c72e9 Mon Sep 17 00:00:00 2001 From: sandeep Date: Fri, 5 Aug 2016 16:35:08 +0530 Subject: [PATCH 4/8] reverting last line changes made --- common/src/main/resources/startup.properties | 2 +- src/conf/startup.properties | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/src/main/resources/startup.properties b/common/src/main/resources/startup.properties index c961ba696..9207b2519 100644 --- a/common/src/main/resources/startup.properties +++ b/common/src/main/resources/startup.properties @@ -342,4 +342,4 @@ it.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandle *.falcon.postprocessing.enable=true ### LogMoveService Thread count -*.falcon.logMoveService.threadCount=50 \ No newline at end of file +*.falcon.logMoveService.threadCount=50 diff --git a/src/conf/startup.properties b/src/conf/startup.properties index f3dc5f90c..a47327a1d 100644 --- a/src/conf/startup.properties +++ b/src/conf/startup.properties @@ -359,4 +359,4 @@ prism.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\ *.falcon.postprocessing.enable=true ### LogMoveService Thread count -*.falcon.logMoveService.threadCount=50 \ No newline at end of file +*.falcon.logMoveService.threadCount=50 From 73a57f8c68dc9ea0aea62285b27aa11c1b9d95b3 Mon Sep 17 00:00:00 2001 From: sandeep Date: Wed, 4 Jan 2017 16:23:37 +0530 Subject: [PATCH 5/8] FALCON-2235 Suspend/Resume API support for extension job (user extension) --- .../apache/falcon/cli/FalconExtensionCLI.java | 6 +- .../falcon/client/AbstractFalconClient.java | 19 +++ .../apache/falcon/client/FalconClient.java | 6 +- .../resource/proxy/EntityProxyUtil.java | 49 ++++++++ .../resource/proxy/ExtensionManagerProxy.java | 108 ++++++++++-------- .../proxy/SchedulableEntityManagerProxy.java | 41 +------ .../apache/falcon/unit/FalconUnitClient.java | 14 ++- .../falcon/unit/LocalExtensionManager.java | 28 +++++ .../apache/falcon/unit/TestFalconUnit.java | 17 ++- .../falcon/resource/ExtensionManager.java | 22 ++++ 10 files changed, 219 insertions(+), 91 deletions(-) diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java b/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java index 2a105dc5b..60578d018 100644 --- a/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java +++ b/cli/src/main/java/org/apache/falcon/cli/FalconExtensionCLI.java @@ -139,10 +139,12 @@ void extensionCommand(CommandLine commandLine, FalconClient client) throws IOExc result = client.scheduleExtensionJob(jobName, colo, doAsUser).getMessage(); } else if (optionsList.contains(FalconCLIConstants.SUSPEND_OPT)) { validateRequiredParameter(jobName, JOB_NAME_OPT); - result = client.suspendExtensionJob(jobName, doAsUser).getMessage(); + colo = getColo(colo); + result = client.suspendExtensionJob(jobName, colo, doAsUser).getMessage(); } else if (optionsList.contains(FalconCLIConstants.RESUME_OPT)) { validateRequiredParameter(jobName, JOB_NAME_OPT); - result = client.resumeExtensionJob(jobName, doAsUser).getMessage(); + colo = getColo(colo); + result = client.resumeExtensionJob(jobName, colo, doAsUser).getMessage(); } else if (optionsList.contains(FalconCLIConstants.DELETE_OPT)) { validateRequiredParameter(jobName, JOB_NAME_OPT); result = client.deleteExtensionJob(jobName, doAsUser).getMessage(); diff --git a/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java b/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java index 7b8a606e7..49392c257 100644 --- a/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java +++ b/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java @@ -255,6 +255,25 @@ public abstract APIResult submitAndScheduleExtensionJob(String extensionName, St * @return APIResult status of the deletion query. */ public abstract APIResult deleteExtensionJob(final String jobName, final String doAsUser); + + /** + * + * @param jobName name of the extension that has to be suspended. + * @param coloExpr comma separated list of colos where the operation has to be performed. + * @param doAsUser proxy user + * @return result status of the suspend operation. + */ + public abstract APIResult suspendExtensionJob(final String jobName, final String coloExpr, final String doAsUser); + + /** + * + * @param jobName name of the extension that has to be resumed. + * @param coloExpr comma separated list of colos where the operation has to be performed. + * @param doAsUser proxy user. + * @return result status of the resume operation. + */ + public abstract APIResult resumeExtensionJob(final String jobName, final String coloExpr, final String doAsUser); + /** * Prepares set of entities the extension has implemented to validate the extension job. * @param jobName job name of the extension job. diff --git a/client/src/main/java/org/apache/falcon/client/FalconClient.java b/client/src/main/java/org/apache/falcon/client/FalconClient.java index 277208560..cf457ea77 100644 --- a/client/src/main/java/org/apache/falcon/client/FalconClient.java +++ b/client/src/main/java/org/apache/falcon/client/FalconClient.java @@ -1209,17 +1209,19 @@ public APIResult scheduleExtensionJob(String jobName, final String coloExpr, fin return getResponse(APIResult.class, clientResponse); } - public APIResult suspendExtensionJob(final String jobName, final String doAsUser) { + public APIResult suspendExtensionJob(final String jobName, final String coloExpr, final String doAsUser) { ClientResponse clientResponse = new ResourceBuilder() .path(ExtensionOperations.SUSPEND.path, jobName) + .addQueryParam(COLO, coloExpr) .addQueryParam(DO_AS_OPT, doAsUser) .call(ExtensionOperations.SUSPEND); return getResponse(APIResult.class, clientResponse); } - public APIResult resumeExtensionJob(final String jobName, final String doAsUser) { + public APIResult resumeExtensionJob(final String jobName, final String coloExpr, final String doAsUser) { ClientResponse clientResponse = new ResourceBuilder() .path(ExtensionOperations.RESUME.path, jobName) + .addQueryParam(COLO, coloExpr) .addQueryParam(DO_AS_OPT, doAsUser) .call(ExtensionOperations.RESUME); return getResponse(APIResult.class, clientResponse); diff --git a/prism/src/main/java/org/apache/falcon/resource/proxy/EntityProxyUtil.java b/prism/src/main/java/org/apache/falcon/resource/proxy/EntityProxyUtil.java index ae0a61ae7..7d00442ef 100644 --- a/prism/src/main/java/org/apache/falcon/resource/proxy/EntityProxyUtil.java +++ b/prism/src/main/java/org/apache/falcon/resource/proxy/EntityProxyUtil.java @@ -120,6 +120,55 @@ protected APIResult doExecute(String colo) throws FalconException { return results; } + APIResult proxySchedule(final String type, final String entity, final String coloExpr, + final Boolean skipDryRun, final String properties, + final HttpServletRequest bufferedRequest) { + return new EntityProxy(type, entity) { + @Override + protected Set getColosToApply() { + return getColosFromExpression(coloExpr, type, entity); + } + + @Override + protected APIResult doExecute(String colo) throws FalconException { + return getEntityManager(colo).invoke("schedule", bufferedRequest, type, entity, + colo, skipDryRun, properties); + } + }.execute(); + } + + APIResult proxySuspend(final String type, final String entity, final String coloExpr, + final HttpServletRequest bufferedRequest) { + return new EntityProxy(type, entity) { + @Override + protected Set getColosToApply() { + return getColosFromExpression(coloExpr, type, entity); + } + + @Override + protected APIResult doExecute(String colo) throws FalconException { + return getEntityManager(colo).invoke("suspend", bufferedRequest, type, entity, + colo); + } + }.execute(); + } + + APIResult proxyResume(final String type, final String entity, final String coloExpr, + final HttpServletRequest bufferedRequest) { + return new EntityProxy(type, entity) { + @Override + protected Set getColosToApply() { + return getColosFromExpression(coloExpr, type, entity); + } + + @Override + protected APIResult doExecute(String colo) throws FalconException { + return getEntityManager(colo).invoke("resume", bufferedRequest, type, entity, + colo); + } + }.execute(); + } + Map proxyUpdate(final String type, final String entityName, final Boolean skipDryRun, final HttpServletRequest bufferedRequest, Entity newEntity) { final Set oldColos = getApplicableColos(type, entityName); diff --git a/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java b/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java index 6f75dc776..068962100 100644 --- a/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java +++ b/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java @@ -221,52 +221,81 @@ public APIResult schedule(@PathParam("job-name") String jobName, @Consumes({MediaType.TEXT_XML, MediaType.TEXT_PLAIN}) @Produces({MediaType.TEXT_XML, MediaType.TEXT_PLAIN, MediaType.APPLICATION_JSON}) public APIResult suspend(@PathParam("job-name") String jobName, - @DefaultValue("") @QueryParam("doAs") String doAsUser) { + @Context HttpServletRequest request, + @DefaultValue("") @QueryParam("doAs") String doAsUser, + @QueryParam("colo") final String coloExpr) { checkIfExtensionServiceIsEnabled(); - try { - List entities = getEntityList("", "", "", TAG_PREFIX_EXTENSION_JOB + jobName, "", doAsUser); - if (entities.isEmpty()) { - // return failure if the extension job doesn't exist - return new APIResult(APIResult.Status.FAILED, "Extension job " + jobName + " doesn't exist."); - } + ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); + ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); + if (extensionJobsBean == null) { + // return failure if the extension job doesn't exist + LOG.error("Extension Job not found:" + jobName); + throw FalconWebException.newAPIException("ExtensionJob not found:" + jobName, + Response.Status.NOT_FOUND); + } - for (Entity entity : entities) { - if (entity.getEntityType().isSchedulable()) { - if (getWorkflowEngine(entity).isActive(entity)) { - getWorkflowEngine(entity).suspend(entity); - } - } - } - } catch (FalconException | IOException e) { - LOG.error("Error when scheduling extension job: " + jobName + ": ", e); + try { + suspendEntities(extensionJobsBean, coloExpr, request); + } catch (FalconException e) { + LOG.error("Error while suspending entities of the extension: " + jobName + ": ", e); throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR); } return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " suspended successfully"); } + private void suspendEntities(ExtensionJobsBean extensionJobsBean, String coloExpr, final HttpServletRequest request) + throws FalconException { + List processes = extensionJobsBean.getProcesses(); + List feeds = extensionJobsBean.getFeeds(); + final HttpServletRequest bufferedRequest = new BufferedRequest(request); + suspendEntities(coloExpr, feeds, EntityType.FEED.name(), bufferedRequest); + suspendEntities(coloExpr, processes, EntityType.PROCESS.name(), bufferedRequest); + } + + private void resumeEntities(ExtensionJobsBean extensionJobsBean, String coloExpr, final HttpServletRequest request) + throws FalconException { + List processes = extensionJobsBean.getProcesses(); + List feeds = extensionJobsBean.getFeeds(); + final HttpServletRequest bufferedRequest = new BufferedRequest(request); + resumeEntities(coloExpr, feeds, EntityType.FEED.name(), bufferedRequest); + resumeEntities(coloExpr, processes, EntityType.PROCESS.name(), bufferedRequest); + } + + private void resumeEntities(String coloExpr, List entityNames, final String entityType, + final HttpServletRequest bufferedRequest) throws FalconException { + for (final String entityName : entityNames) { + entityProxyUtil.proxyResume(entityType, entityName, coloExpr, bufferedRequest); + } + } + + private void suspendEntities(String coloExpr, List entityNames, final String entityType, + final HttpServletRequest bufferedRequest) throws FalconException { + for (final String entityName : entityNames) { + entityProxyUtil.proxySuspend(entityType, entityName, coloExpr, bufferedRequest); + } + } + @POST @Path("resume/{job-name}") @Consumes({MediaType.TEXT_XML, MediaType.TEXT_PLAIN}) @Produces({MediaType.TEXT_XML, MediaType.TEXT_PLAIN, MediaType.APPLICATION_JSON}) public APIResult resume(@PathParam("job-name") String jobName, + @Context HttpServletRequest request, + @QueryParam("colo") final String coloExpr, @DefaultValue("") @QueryParam("doAs") String doAsUser) { checkIfExtensionServiceIsEnabled(); + ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); + ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); + if (extensionJobsBean == null) { + // return failure if the extension job doesn't exist + LOG.error("Extension Job not found:" + jobName); + throw FalconWebException.newAPIException("ExtensionJob not found:" + jobName, + Response.Status.NOT_FOUND); + } try { - List entities = getEntityList("", "", "", TAG_PREFIX_EXTENSION_JOB + jobName, "", doAsUser); - if (entities.isEmpty()) { - // return failure if the extension job doesn't exist - return new APIResult(APIResult.Status.FAILED, "Extension job " + jobName + " doesn't exist."); - } - - for (Entity entity : entities) { - if (entity.getEntityType().isSchedulable()) { - if (getWorkflowEngine(entity).isSuspended(entity)) { - getWorkflowEngine(entity).resume(entity); - } - } - } - } catch (FalconException | IOException e) { - LOG.error("Error when resuming extension job " + jobName + ": ", e); + resumeEntities(extensionJobsBean, coloExpr, request); + } catch (FalconException e) { + LOG.error("Error while resuming entities of the extension: " + jobName + ": ", e); throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR); } return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " resumed successfully"); @@ -405,21 +434,8 @@ private void scheduleEntities(Map> entityMap, HttpServl for (final Entity entity : entry.getValue()) { final HttpServletRequest httpServletRequest = getEntityStream(entity, entity.getEntityType(), request); final HttpServletRequest bufferedRequest = getBufferedRequest(httpServletRequest); - final Set colos = getColosFromExpression(coloExpr, entity.getEntityType().name(), entity); - - new EntityProxy(entity.getEntityType().toString(), entity.getName()) { - @Override - protected Set getColosToApply() { - return colos; - } - - @Override - protected APIResult doExecute(String colo) throws FalconException { - return new EntityProxyUtil().getEntityManager(colo).invoke("schedule", bufferedRequest, - entity.getEntityType().toString(), - entity.getName(), colo, Boolean.FALSE, ""); - } - }.execute(); + entityProxyUtil.proxySchedule(entity.getEntityType().name(), entity.getName(), coloExpr, + Boolean.FALSE, "", bufferedRequest); } } } diff --git a/prism/src/main/java/org/apache/falcon/resource/proxy/SchedulableEntityManagerProxy.java b/prism/src/main/java/org/apache/falcon/resource/proxy/SchedulableEntityManagerProxy.java index 8f41c483f..5b5d690fb 100644 --- a/prism/src/main/java/org/apache/falcon/resource/proxy/SchedulableEntityManagerProxy.java +++ b/prism/src/main/java/org/apache/falcon/resource/proxy/SchedulableEntityManagerProxy.java @@ -469,18 +469,7 @@ public APIResult schedule(@Context final HttpServletRequest request, @QueryParam("properties") final String properties) { final HttpServletRequest bufferedRequest = getBufferedRequest(request); - return new EntityProxy(type, entity) { - @Override - protected Set getColosToApply() { - return getColosFromExpression(coloExpr, type, entity); - } - - @Override - protected APIResult doExecute(String colo) throws FalconException { - return entityProxyUtil.getEntityManager(colo).invoke("schedule", bufferedRequest, type, entity, - colo, skipDryRun, properties); - } - }.execute(); + return entityProxyUtil.proxySchedule(type, entity, coloExpr, skipDryRun, properties, bufferedRequest); } /** @@ -531,22 +520,11 @@ public APIResult suspend(@Context final HttpServletRequest request, @Dimension("colo") @QueryParam("colo") final String coloExpr) { final HttpServletRequest bufferedRequest = new BufferedRequest(request); - return new EntityProxy(type, entity) { - @Override - protected Set getColosToApply() { - return getColosFromExpression(coloExpr, type, entity); - } - - @Override - protected APIResult doExecute(String colo) throws FalconException { - return entityProxyUtil.getEntityManager(colo).invoke("suspend", bufferedRequest, type, entity, - colo); - } - }.execute(); + return entityProxyUtil.proxySuspend(type, entity, coloExpr, bufferedRequest); } /** - * Resume a supended entity. + * Resume a suspended entity. * @param request Servlet Request * @param type Valid options are feed or process. * @param entity Name of the entity. @@ -564,18 +542,7 @@ public APIResult resume( @Dimension("colo") @QueryParam("colo") final String coloExpr) { final HttpServletRequest bufferedRequest = new BufferedRequest(request); - return new EntityProxy(type, entity) { - @Override - protected Set getColosToApply() { - return getColosFromExpression(coloExpr, type, entity); - } - - @Override - protected APIResult doExecute(String colo) throws FalconException { - return entityProxyUtil.getEntityManager(colo).invoke("resume", bufferedRequest, type, entity, - colo); - } - }.execute(); + return entityProxyUtil.proxyResume(type, entity, coloExpr, bufferedRequest); } //SUSPEND CHECKSTYLE CHECK ParameterNumberCheck diff --git a/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java index 6a65d2c28..e44faf972 100644 --- a/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java +++ b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java @@ -77,14 +77,14 @@ public class FalconUnitClient extends AbstractFalconClient { private static final String DEFAULT_ORDER_BY = "status"; private static final String DEFAULT_SORTED_ORDER = "asc"; - protected ConfigurationStore configStore; + private ConfigurationStore configStore; private AbstractWorkflowEngine workflowEngine; private LocalSchedulableEntityManager localSchedulableEntityManager; private LocalInstanceManager localInstanceManager; private LocalExtensionManager localExtensionManager; - public FalconUnitClient() throws FalconException { + FalconUnitClient() throws FalconException { configStore = ConfigurationStore.get(); workflowEngine = WorkflowEngineFactory.getWorkflowEngine(); localSchedulableEntityManager = new LocalSchedulableEntityManager(); @@ -123,7 +123,6 @@ public APIResult submit(String type, String filePath, String doAsUser) { * @param entityName entity name * @param cluster cluster on which it has to be scheduled * @return - * @throws FalconException */ @Override public APIResult schedule(EntityType entityType, String entityName, String cluster, @@ -377,6 +376,15 @@ public APIResult deleteExtensionJob(String jobName, String doAsUser) { } } + @Override + public APIResult suspendExtensionJob(String jobName, String coloExpr, String doAsUser) { + return localExtensionManager.suspendExtensionJob(jobName, coloExpr, doAsUser); + } + + @Override + public APIResult resumeExtensionJob(String jobName, String coloExpr, String doAsUser) { + return localExtensionManager.resumeExtensionJob(jobName, coloExpr, doAsUser); + } @Override public APIResult getExtensionJobDetails(final String jobName) { diff --git a/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java b/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java index 20ccfcaab..8cd6e8723 100644 --- a/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java +++ b/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java @@ -148,6 +148,34 @@ APIResult updateExtensionJob(String extensionName, String jobName, InputStream c return new APIResult(APIResult.Status.SUCCEEDED, "Updated successfully"); } + APIResult suspendExtensionJob(String jobName, String coloExpr, String doAsUser) { + ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); + ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); + List feeds = extensionJobsBean.getFeeds(); + List processes = extensionJobsBean.getProcesses(); + for (String entityName : feeds) { + super.suspend(null, EntityType.FEED.name(), entityName, coloExpr); + } + for (String entityName : processes) { + super.suspend(null, EntityType.PROCESS.name(), entityName, coloExpr); + } + return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " suspended successfully"); + } + + APIResult resumeExtensionJob(String jobName, String coloExpr, String doAsUser) { + ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); + ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); + List feeds = extensionJobsBean.getFeeds(); + List processes = extensionJobsBean.getProcesses(); + for (String entityName : feeds) { + super.resume(null, EntityType.FEED.name(), entityName, coloExpr); + } + for (String entityName : processes) { + super.resume(null, EntityType.PROCESS.name(), entityName, coloExpr); + } + return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " suspended successfully"); + } + APIResult registerExtensionMetadata(String extensionName, String packagePath, String description) { return super.registerExtensionMetadata(extensionName, packagePath, description, CurrentUser.getUser()); } diff --git a/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java b/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java index 508a7bb02..5717fc2a7 100644 --- a/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java +++ b/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java @@ -438,6 +438,8 @@ public void testExtensionJobOperations() throws Exception { clearDB(); submitCluster(); createExtensionPackage(); + createDir(PROCESS_APP_PATH); + fs.copyFromLocalFile(new Path(getAbsolutePath(WORKFLOW)), new Path(PROCESS_APP_PATH, "workflow.xml")); String packageBuildLib = new Path(EXTENSION_PATH, "libs/build/").toString(); String result = registerExtension(TEST_EXTENSION, STORAGE_URL + EXTENSION_PATH, TEST_EXTENSION); Assert.assertEquals(result, "Extension :testExtension registered successfully."); @@ -454,6 +456,14 @@ public void testExtensionJobOperations() throws Exception { apiResult = getClient().scheduleExtensionJob(TEST_JOB, null, null); assertStatus(apiResult); + + apiResult = getClient().suspendExtensionJob(TEST_JOB, null, null); + assertStatus(apiResult); + apiResult = getClient().getStatus(EntityType.PROCESS, "sample", CLUSTER_NAME, null, false); + Assert.assertEquals(apiResult.getMessage(), "SUSPENDED"); + + apiResult = getClient().resumeExtensionJob(TEST_JOB, null, null); + assertStatus(apiResult); apiResult = getClient().getStatus(EntityType.PROCESS, "sample", CLUSTER_NAME, null, false); assertStatus(apiResult); Assert.assertEquals(apiResult.getMessage(), "RUNNING"); @@ -487,8 +497,13 @@ public void testExtensionJobOperations() throws Exception { } } + @Test + public void testExtensionJobSuspendAndResume() throws Exception { + + } + - void copyExtensionJar(String destDirPath) throws IOException { + private void copyExtensionJar(String destDirPath) throws IOException { File dir = new File(new Path(JARS_DIR).toUri().toURL().getPath()); for (File file : dir.listFiles()) { if (file.toString().endsWith(".jar")) { diff --git a/webapp/src/main/java/org/apache/falcon/resource/ExtensionManager.java b/webapp/src/main/java/org/apache/falcon/resource/ExtensionManager.java index 3a6c9c003..ac05b0f86 100644 --- a/webapp/src/main/java/org/apache/falcon/resource/ExtensionManager.java +++ b/webapp/src/main/java/org/apache/falcon/resource/ExtensionManager.java @@ -130,6 +130,28 @@ public APIResult deleteExtensionMetadata( + "on Prism."); } + @POST + @Path("suspend/{job-name}") + @Consumes({MediaType.TEXT_XML, MediaType.TEXT_PLAIN}) + @Produces({MediaType.TEXT_XML, MediaType.TEXT_PLAIN, MediaType.APPLICATION_JSON}) + public APIResult suspend(@PathParam("job-name") String jobName, + @DefaultValue("") @QueryParam("doAs") String doAsUser) { + LOG.error("Suspend of an extension job is not supported on Server.Please run your operation on Prism "); + throw FalconWebException.newAPIException("Suspend of an extension job is not supported on Server." + + "Please run your operation on Prism."); + } + + @POST + @Path("resume/{job-name}") + @Consumes({MediaType.TEXT_XML, MediaType.TEXT_PLAIN}) + @Produces({MediaType.TEXT_XML, MediaType.TEXT_PLAIN, MediaType.APPLICATION_JSON}) + public APIResult resume(@PathParam("job-name") String jobName, + @DefaultValue("") @QueryParam("doAs") String doAsUser) { + LOG.error("Resume of an extension job is not supported on Server.Please run your operation on Prism "); + throw FalconWebException.newAPIException("Resume of an extension job is not supported on Server." + + "Please run your operation on Prism."); + } + @GET @Path("definition/{extension-name}") @Produces({MediaType.APPLICATION_JSON}) From 80ffd94fde6527d922fdb39165e4c4d435dcedc3 Mon Sep 17 00:00:00 2001 From: sandeep Date: Thu, 5 Jan 2017 12:01:23 +0530 Subject: [PATCH 6/8] FALCON-2235 Incorporated Review comments --- .../resource/AbstractExtensionManager.java | 39 +++------ .../resource/proxy/ExtensionManagerProxy.java | 86 ++++++++----------- .../falcon/unit/LocalExtensionManager.java | 18 ++-- 3 files changed, 55 insertions(+), 88 deletions(-) diff --git a/prism/src/main/java/org/apache/falcon/resource/AbstractExtensionManager.java b/prism/src/main/java/org/apache/falcon/resource/AbstractExtensionManager.java index 63bf1b655..ff8968262 100644 --- a/prism/src/main/java/org/apache/falcon/resource/AbstractExtensionManager.java +++ b/prism/src/main/java/org/apache/falcon/resource/AbstractExtensionManager.java @@ -20,11 +20,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.falcon.FalconException; import org.apache.falcon.FalconWebException; -import org.apache.falcon.entity.EntityNotRegisteredException; -import org.apache.falcon.entity.EntityUtil; import org.apache.falcon.entity.parser.ValidationException; import org.apache.falcon.extensions.ExtensionStatus; -import org.apache.falcon.entity.v0.Entity; import org.apache.falcon.entity.v0.EntityType; import org.apache.falcon.extensions.jdbc.ExtensionMetaStore; import org.apache.falcon.extensions.store.ExtensionStore; @@ -37,8 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.ws.rs.core.Response; -import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.SortedMap; import java.util.TreeMap; @@ -59,9 +54,9 @@ public class AbstractExtensionManager extends AbstractSchedulableEntityManager { private static final String LAST_UPDATE_TIME = "lastUpdatedTime"; public static final String NAME = "name"; - protected static final String EXTENSION_TYPE = "type"; - protected static final String EXTENSION_DESC = "description"; - protected static final String EXTENSION_LOCATION = "location"; + private static final String EXTENSION_TYPE = "type"; + private static final String EXTENSION_DESC = "description"; + private static final String EXTENSION_LOCATION = "location"; protected static void validateExtensionName(final String extensionName) { if (StringUtils.isBlank(extensionName)) { @@ -114,28 +109,14 @@ public APIResult deleteExtensionMetadata(String extensionName){ } } - protected SortedMap> getJobEntities(ExtensionJobsBean extensionJobsBean) - throws FalconException, IOException { - TreeMap> entityMap = new TreeMap<>(); - List processes = extensionJobsBean.getProcesses(); - List feeds = extensionJobsBean.getFeeds(); - entityMap.put(EntityType.PROCESS, getEntities(processes, EntityType.PROCESS)); - entityMap.put(EntityType.FEED, getEntities(feeds, EntityType.FEED)); + protected SortedMap> getJobEntities(ExtensionJobsBean extensionJobsBean) + throws FalconException { + TreeMap> entityMap = new TreeMap<>(); + entityMap.put(EntityType.PROCESS, extensionJobsBean.getProcesses()); + entityMap.put(EntityType.FEED, extensionJobsBean.getFeeds()); return entityMap; } - private List getEntities(List entityNames, EntityType entityType) throws FalconException { - List entities = new ArrayList<>(); - for (String entityName : entityNames) { - try { - entities.add(EntityUtil.getEntity(entityType, entityName)); - } catch (EntityNotRegisteredException e) { - LOG.error("Entity {} not found during deletion nothing to do", entityName); - } - } - return entities; - } - private JSONObject buildExtensionJobDetailResult(final String jobName) throws FalconException { ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); ExtensionJobsBean jobsBean = metaStore.getExtensionJobDetails(jobName); @@ -174,7 +155,7 @@ public static String getJobNameFromTag(String tags) { return tags.substring(nameStart, nameEnd); } - public String disableExtension(String extensionName, String currentUser) { + protected String disableExtension(String extensionName, String currentUser) { validateExtensionName(extensionName); try { return ExtensionStore.get().updateExtensionStatus(extensionName, currentUser, ExtensionStatus.DISABLED); @@ -183,7 +164,7 @@ public String disableExtension(String extensionName, String currentUser) { } } - public String enableExtension(String extensionName, String currentUser) { + protected String enableExtension(String extensionName, String currentUser) { validateExtensionName(extensionName); try { return ExtensionStore.get().updateExtensionStatus(extensionName, currentUser, ExtensionStatus.ENABLED); diff --git a/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java b/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java index 068962100..1ed95b271 100644 --- a/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java +++ b/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java @@ -205,11 +205,11 @@ public APIResult schedule(@PathParam("job-name") String jobName, Response.Status.NOT_FOUND); } - SortedMap> entityMap; + SortedMap> entityMap; try { entityMap = getJobEntities(extensionJobsBean); scheduleEntities(entityMap, request, coloExpr); - } catch (FalconException | IOException | JAXBException e) { + } catch (FalconException e) { LOG.error("Error while scheduling entities of the extension: " + jobName + ": ", e); throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR); } @@ -235,7 +235,8 @@ public APIResult suspend(@PathParam("job-name") String jobName, } try { - suspendEntities(extensionJobsBean, coloExpr, request); + SortedMap> entityNameMap = getJobEntities(extensionJobsBean); + suspendEntities(entityNameMap, coloExpr, request); } catch (FalconException e) { LOG.error("Error while suspending entities of the extension: " + jobName + ": ", e); throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR); @@ -243,35 +244,21 @@ public APIResult suspend(@PathParam("job-name") String jobName, return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " suspended successfully"); } - private void suspendEntities(ExtensionJobsBean extensionJobsBean, String coloExpr, final HttpServletRequest request) - throws FalconException { - List processes = extensionJobsBean.getProcesses(); - List feeds = extensionJobsBean.getFeeds(); - final HttpServletRequest bufferedRequest = new BufferedRequest(request); - suspendEntities(coloExpr, feeds, EntityType.FEED.name(), bufferedRequest); - suspendEntities(coloExpr, processes, EntityType.PROCESS.name(), bufferedRequest); - } - - private void resumeEntities(ExtensionJobsBean extensionJobsBean, String coloExpr, final HttpServletRequest request) - throws FalconException { - List processes = extensionJobsBean.getProcesses(); - List feeds = extensionJobsBean.getFeeds(); - final HttpServletRequest bufferedRequest = new BufferedRequest(request); - resumeEntities(coloExpr, feeds, EntityType.FEED.name(), bufferedRequest); - resumeEntities(coloExpr, processes, EntityType.PROCESS.name(), bufferedRequest); - } - - private void resumeEntities(String coloExpr, List entityNames, final String entityType, - final HttpServletRequest bufferedRequest) throws FalconException { - for (final String entityName : entityNames) { - entityProxyUtil.proxyResume(entityType, entityName, coloExpr, bufferedRequest); + private void suspendEntities(SortedMap> entityNameMap, String coloExpr, + final HttpServletRequest request) throws FalconException { + for (Map.Entry> entityTypeEntry : entityNameMap.entrySet()) { + for (final String entityName : entityTypeEntry.getValue()) { + entityProxyUtil.proxySuspend(entityTypeEntry.getKey().name(), entityName, coloExpr, request); + } } } - private void suspendEntities(String coloExpr, List entityNames, final String entityType, - final HttpServletRequest bufferedRequest) throws FalconException { - for (final String entityName : entityNames) { - entityProxyUtil.proxySuspend(entityType, entityName, coloExpr, bufferedRequest); + private void resumeEntities(SortedMap> entityNameMap, String coloExpr, + final HttpServletRequest request) throws FalconException { + for (Map.Entry> entityTypeEntry : entityNameMap.entrySet()) { + for (final String entityName : entityTypeEntry.getValue()) { + entityProxyUtil.proxyResume(entityTypeEntry.getKey().name(), entityName, coloExpr, request); + } } } @@ -293,7 +280,8 @@ public APIResult resume(@PathParam("job-name") String jobName, Response.Status.NOT_FOUND); } try { - resumeEntities(extensionJobsBean, coloExpr, request); + SortedMap> entityNameMap = getJobEntities(extensionJobsBean); + resumeEntities(entityNameMap, coloExpr, request); } catch (FalconException e) { LOG.error("Error while resuming entities of the extension: " + jobName + ": ", e); throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR); @@ -317,11 +305,11 @@ public APIResult delete(@PathParam("job-name") String jobName, "Extension job " + jobName + " doesn't exist. Nothing to delete."); } - SortedMap> entityMap; + SortedMap> entityMap; try { entityMap = getJobEntities(extensionJobsBean); deleteEntities(entityMap, request); - } catch (FalconException | IOException | JAXBException e) { + } catch (FalconException e) { LOG.error("Error when deleting extension job: " + jobName + ": ", e); throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR); } @@ -417,10 +405,13 @@ public APIResult submitAndSchedule( checkIfExtensionIsEnabled(extensionName); checkIfExtensionJobExists(jobName, extensionName); SortedMap> entityMap; + SortedMap> entityNameMap; + ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); try { entityMap = getEntityList(extensionName, jobName, feedForms, processForms, config); submitEntities(extensionName, jobName, entityMap, config, request); - scheduleEntities(entityMap, request, coloExpr); + entityNameMap = getJobEntities(metaStore.getExtensionJobDetails(jobName)); + scheduleEntities(entityNameMap, request, coloExpr); } catch (FalconException | IOException | JAXBException e) { LOG.error("Error while submitting extension job: ", e); throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR); @@ -428,14 +419,12 @@ public APIResult submitAndSchedule( return new APIResult(APIResult.Status.SUCCEEDED, "Extension job submitted and scheduled successfully"); } - private void scheduleEntities(Map> entityMap, HttpServletRequest request, String coloExpr) - throws FalconException, JAXBException, IOException { - for (Map.Entry> entry : entityMap.entrySet()) { - for (final Entity entity : entry.getValue()) { - final HttpServletRequest httpServletRequest = getEntityStream(entity, entity.getEntityType(), request); - final HttpServletRequest bufferedRequest = getBufferedRequest(httpServletRequest); - entityProxyUtil.proxySchedule(entity.getEntityType().name(), entity.getName(), coloExpr, - Boolean.FALSE, "", bufferedRequest); + private void scheduleEntities(SortedMap> entityMap, HttpServletRequest request, + String coloExpr) throws FalconException { + for (Map.Entry> entityTypeEntry : entityMap.entrySet()) { + for (final String entityName : entityTypeEntry.getValue()) { + entityProxyUtil.proxySchedule(entityTypeEntry.getKey().name(), entityName, coloExpr, + Boolean.FALSE, "", request); } } } @@ -447,16 +436,13 @@ private BufferedRequest getBufferedRequest(HttpServletRequest request) { return new BufferedRequest(request); } - private void deleteEntities(SortedMap> entityMap, HttpServletRequest request) - throws IOException, JAXBException { - for (Map.Entry> entry : entityMap.entrySet()) { - for (final Entity entity : entry.getValue()) { - final HttpServletRequest bufferedRequest = getEntityStream(entity, entity.getEntityType(), request); - final String entityType = entity.getEntityType().toString(); - final String entityName = entity.getName(); - entityProxyUtil.proxyDelete(entityType, entityName, bufferedRequest); + private void deleteEntities(SortedMap> entityMap, HttpServletRequest request) + throws FalconException { + for (Map.Entry> entityTypeEntry : entityMap.entrySet()) { + for (final String entityName : entityTypeEntry.getValue()) { + entityProxyUtil.proxyDelete(entityTypeEntry.getKey().name(), entityName, request); if (!embeddedMode) { - super.delete(bufferedRequest, entityType, entityName, currentColo); + super.delete(request, entityTypeEntry.getKey().name(), entityName, currentColo); } } } diff --git a/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java b/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java index 8cd6e8723..02970f737 100644 --- a/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java +++ b/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java @@ -39,7 +39,7 @@ /** * A proxy implementation of the extension operations in local mode. */ -public class LocalExtensionManager extends AbstractExtensionManager { +class LocalExtensionManager extends AbstractExtensionManager { LocalExtensionManager() {} APIResult submitExtensionJob(String extensionName, String jobName, InputStream configStream, @@ -99,10 +99,10 @@ APIResult scheduleExtensionJob(String jobName, String coloExpr, String doAsUser) throws FalconException, IOException{ ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); - SortedMap> entityMap = getJobEntities(extensionJobsBean); - for (Map.Entry> entry : entityMap.entrySet()) { - for (Entity entity : entry.getValue()) { - scheduleInternal(entity.getEntityType().name(), entity.getName(), true, null); + SortedMap> entityMap = getJobEntities(extensionJobsBean); + for (Map.Entry> entry : entityMap.entrySet()) { + for (String entityName : entry.getValue()) { + scheduleInternal(entry.getKey().name(), entityName, true, null); } } return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " scheduled successfully"); @@ -111,10 +111,10 @@ APIResult scheduleExtensionJob(String jobName, String coloExpr, String doAsUser) APIResult deleteExtensionJob(String jobName) throws FalconException, IOException { ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); - SortedMap> entityMap = getJobEntities(extensionJobsBean); - for (Map.Entry> entry : entityMap.entrySet()) { - for (Entity entity : entry.getValue()) { - delete(entity.getEntityType().name(), entity.getName(), null); + SortedMap> entityMap = getJobEntities(extensionJobsBean); + for (Map.Entry> entry : entityMap.entrySet()) { + for (String entityName : entry.getValue()) { + delete(entry.getKey().name(), entityName, null); } } ExtensionStore.getMetaStore().deleteExtensionJob(jobName); From 554824d4687dfb05a1c1a8464d65695b627aeab1 Mon Sep 17 00:00:00 2001 From: sandeep Date: Thu, 5 Jan 2017 12:46:26 +0530 Subject: [PATCH 7/8] FALCON-2235 new bufferedRequest to let mark/reset apis validation work for the streams --- .../resource/proxy/ExtensionManagerProxy.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java b/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java index 1ed95b271..8733170a9 100644 --- a/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java +++ b/prism/src/main/java/org/apache/falcon/resource/proxy/ExtensionManagerProxy.java @@ -246,18 +246,20 @@ public APIResult suspend(@PathParam("job-name") String jobName, private void suspendEntities(SortedMap> entityNameMap, String coloExpr, final HttpServletRequest request) throws FalconException { + HttpServletRequest bufferedRequest = new BufferedRequest(request); for (Map.Entry> entityTypeEntry : entityNameMap.entrySet()) { for (final String entityName : entityTypeEntry.getValue()) { - entityProxyUtil.proxySuspend(entityTypeEntry.getKey().name(), entityName, coloExpr, request); + entityProxyUtil.proxySuspend(entityTypeEntry.getKey().name(), entityName, coloExpr, bufferedRequest); } } } private void resumeEntities(SortedMap> entityNameMap, String coloExpr, final HttpServletRequest request) throws FalconException { + HttpServletRequest bufferedRequest = new BufferedRequest(request); for (Map.Entry> entityTypeEntry : entityNameMap.entrySet()) { for (final String entityName : entityTypeEntry.getValue()) { - entityProxyUtil.proxyResume(entityTypeEntry.getKey().name(), entityName, coloExpr, request); + entityProxyUtil.proxyResume(entityTypeEntry.getKey().name(), entityName, coloExpr, bufferedRequest); } } } @@ -421,10 +423,11 @@ public APIResult submitAndSchedule( private void scheduleEntities(SortedMap> entityMap, HttpServletRequest request, String coloExpr) throws FalconException { + HttpServletRequest bufferedRequest = new BufferedRequest(request); for (Map.Entry> entityTypeEntry : entityMap.entrySet()) { for (final String entityName : entityTypeEntry.getValue()) { entityProxyUtil.proxySchedule(entityTypeEntry.getKey().name(), entityName, coloExpr, - Boolean.FALSE, "", request); + Boolean.FALSE, "", bufferedRequest); } } } @@ -440,9 +443,10 @@ private void deleteEntities(SortedMap> entityMap, HttpS throws FalconException { for (Map.Entry> entityTypeEntry : entityMap.entrySet()) { for (final String entityName : entityTypeEntry.getValue()) { - entityProxyUtil.proxyDelete(entityTypeEntry.getKey().name(), entityName, request); + HttpServletRequest bufferedRequest = new BufferedRequest(request); + entityProxyUtil.proxyDelete(entityTypeEntry.getKey().name(), entityName, bufferedRequest); if (!embeddedMode) { - super.delete(request, entityTypeEntry.getKey().name(), entityName, currentColo); + super.delete(bufferedRequest, entityTypeEntry.getKey().name(), entityName, currentColo); } } } From f1f1f031c920976b0ec07d3da134f4a83c73821e Mon Sep 17 00:00:00 2001 From: sandeep Date: Fri, 6 Jan 2017 12:36:45 +0530 Subject: [PATCH 8/8] FALCON-2235 Incorporated review comments --- .../apache/falcon/unit/FalconUnitClient.java | 12 ++++++-- .../falcon/unit/LocalExtensionManager.java | 28 ++++++++----------- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java index e44faf972..d76dbcacd 100644 --- a/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java +++ b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java @@ -378,12 +378,20 @@ public APIResult deleteExtensionJob(String jobName, String doAsUser) { @Override public APIResult suspendExtensionJob(String jobName, String coloExpr, String doAsUser) { - return localExtensionManager.suspendExtensionJob(jobName, coloExpr, doAsUser); + try { + return localExtensionManager.suspendExtensionJob(jobName, coloExpr, doAsUser); + } catch (FalconException e) { + throw new FalconCLIException("Failed in suspending the extension job:" + jobName); + } } @Override public APIResult resumeExtensionJob(String jobName, String coloExpr, String doAsUser) { - return localExtensionManager.resumeExtensionJob(jobName, coloExpr, doAsUser); + try { + return localExtensionManager.resumeExtensionJob(jobName, coloExpr, doAsUser); + } catch (FalconException e) { + throw new FalconCLIException("Failed in resuming the extension job:" + jobName); + } } @Override diff --git a/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java b/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java index 02970f737..a32dbfa53 100644 --- a/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java +++ b/unit/src/main/java/org/apache/falcon/unit/LocalExtensionManager.java @@ -148,30 +148,26 @@ APIResult updateExtensionJob(String extensionName, String jobName, InputStream c return new APIResult(APIResult.Status.SUCCEEDED, "Updated successfully"); } - APIResult suspendExtensionJob(String jobName, String coloExpr, String doAsUser) { + APIResult suspendExtensionJob(String jobName, String coloExpr, String doAsUser) throws FalconException { ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); - List feeds = extensionJobsBean.getFeeds(); - List processes = extensionJobsBean.getProcesses(); - for (String entityName : feeds) { - super.suspend(null, EntityType.FEED.name(), entityName, coloExpr); - } - for (String entityName : processes) { - super.suspend(null, EntityType.PROCESS.name(), entityName, coloExpr); + SortedMap> entityMap = getJobEntities(extensionJobsBean); + for (Map.Entry> entityTypeEntry : entityMap.entrySet()) { + for (String entityName : entityTypeEntry.getValue()) { + super.suspend(null, entityTypeEntry.getKey().name(), entityName, coloExpr); + } } return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " suspended successfully"); } - APIResult resumeExtensionJob(String jobName, String coloExpr, String doAsUser) { + APIResult resumeExtensionJob(String jobName, String coloExpr, String doAsUser) throws FalconException { ExtensionMetaStore metaStore = ExtensionStore.getMetaStore(); ExtensionJobsBean extensionJobsBean = metaStore.getExtensionJobDetails(jobName); - List feeds = extensionJobsBean.getFeeds(); - List processes = extensionJobsBean.getProcesses(); - for (String entityName : feeds) { - super.resume(null, EntityType.FEED.name(), entityName, coloExpr); - } - for (String entityName : processes) { - super.resume(null, EntityType.PROCESS.name(), entityName, coloExpr); + SortedMap> entityMap = getJobEntities(extensionJobsBean); + for (Map.Entry> entityTypeEntry : entityMap.entrySet()) { + for (String entityName : entityTypeEntry.getValue()) { + super.resume(null, entityTypeEntry.getKey().name(), entityName, coloExpr); + } } return new APIResult(APIResult.Status.SUCCEEDED, "Extension job " + jobName + " suspended successfully"); }