diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..a7d91c4d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+target
+.settings
+.classpath
+.project
+/bin/
+job-repository.db
\ No newline at end of file
diff --git a/ambari-infra-assembly/pom.xml b/ambari-infra-assembly/pom.xml
index 51e58047..fafef7e8 100644
--- a/ambari-infra-assembly/pom.xml
+++ b/ambari-infra-assembly/pom.xml
@@ -38,6 +38,10 @@
${mapping.base.path}/${solr.client.package.name}
${project.basedir}/../ambari-infra-solr-client
${project.basedir}/../ambari-infra-solr-plugin
+ ambari-infra-manager
+ ${project.basedir}/../ambari-infra-manager
+ ${mapping.base.path}/${infra-manager.package.name}
+ /etc/${infra-manager.package.name}/conf
@@ -118,6 +122,45 @@
+
+ infra-manager
+ package
+
+ rpm
+
+
+ Development
+ ${infra-manager.package.name}
+
+
+ ${infra-manager.mapping.path}
+
+
+ ${infra-manager.dir}/target/package
+
+ log4j.xml
+ infra-manager.properties
+ infra-manager-env.sh
+
+
+
+
+
+ ${infra-manager.conf.mapping.path}
+
+
+ ${infra-manager.dir}/target/package
+
+ log4j.xml
+ infra-manager.properties
+ infra-manager-env.sh
+
+
+
+
+
+
+
@@ -277,6 +320,49 @@
+
+
+ package
+ jdeb-infra-manager
+
+ jdeb
+
+
+ ${basedir}/src/main/package/deb/manager
+ ${basedir}/target/${infra-manager.package.name}_${package-version}-${package-release}.deb
+ false
+ false
+
+
+ ${infra-manager.dir}/target/ambari-infra-manager.tar.gz
+ archive
+
+ perm
+ root
+ root
+ ${infra-manager.mapping.path}
+
+
+ log4j.xml,infra-manager.properties,infra-manager-env.sh
+
+
+
+ ${infra-manager.dir}/target/package
+ directory
+
+ ${infra-manager.conf.mapping.path}
+ perm
+ root
+ root
+ 644
+
+
+ log4j.xml,infra-manager.properties,infra-manager-env.sh
+
+
+
+
+
@@ -330,6 +416,11 @@
ambari-infra-solr-plugin
${project.version}
+
+ org.apache.ambari
+ ambari-infra-manager
+ ${project.version}
+
diff --git a/ambari-infra-assembly/src/main/package/deb/manager/control b/ambari-infra-assembly/src/main/package/deb/manager/control
new file mode 100644
index 00000000..03663a06
--- /dev/null
+++ b/ambari-infra-assembly/src/main/package/deb/manager/control
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+Package: [[infra-manager.package.name]]
+Version: [[package-version]]-[[package-release]]
+Section: [[deb.section]]
+Priority: [[deb.priority]]
+Depends: [[deb.dependency.list]]
+Architecture: [[deb.architecture]]
+Description: [[description]]
+Maintainer: [[deb.publisher]]
\ No newline at end of file
diff --git a/ambari-infra-assembly/src/main/package/deb/manager/postinst b/ambari-infra-assembly/src/main/package/deb/manager/postinst
new file mode 100644
index 00000000..21a01faa
--- /dev/null
+++ b/ambari-infra-assembly/src/main/package/deb/manager/postinst
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra-assembly/src/main/package/deb/manager/postrm b/ambari-infra-assembly/src/main/package/deb/manager/postrm
new file mode 100644
index 00000000..21a01faa
--- /dev/null
+++ b/ambari-infra-assembly/src/main/package/deb/manager/postrm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra-assembly/src/main/package/deb/manager/preinst b/ambari-infra-assembly/src/main/package/deb/manager/preinst
new file mode 100644
index 00000000..21a01faa
--- /dev/null
+++ b/ambari-infra-assembly/src/main/package/deb/manager/preinst
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra-assembly/src/main/package/deb/manager/prerm b/ambari-infra-assembly/src/main/package/deb/manager/prerm
new file mode 100644
index 00000000..21a01faa
--- /dev/null
+++ b/ambari-infra-assembly/src/main/package/deb/manager/prerm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra-assembly/src/main/resources/solr b/ambari-infra-assembly/src/main/resources/solr
old mode 100644
new mode 100755
index bf504d9f..6f2de8fd
--- a/ambari-infra-assembly/src/main/resources/solr
+++ b/ambari-infra-assembly/src/main/resources/solr
@@ -49,6 +49,9 @@ SOLR_SCRIPT="$0"
verbose=false
THIS_OS=`uname -s`
+# What version of Java is required to run this version of Solr.
+JAVA_VER_REQ="1.8"
+
stop_all=false
# for now, we don't support running this script from cygwin due to problems
@@ -84,6 +87,7 @@ if [ -z "$SOLR_INCLUDE" ]; then
/etc/default/solr.in.sh \
/opt/solr/solr.in.sh; do
if [ -r "$include" ]; then
+ SOLR_INCLUDE="$include"
. "$include"
break
fi
@@ -116,16 +120,44 @@ else
JAVA=java
fi
-# test that Java exists and is executable on this server
-"$JAVA" -version >/dev/null 2>&1 || {
+if [ -z "$SOLR_STOP_WAIT" ]; then
+ SOLR_STOP_WAIT=180
+fi
+# test that Java exists, is executable and correct version
+JAVA_VER=$("$JAVA" -version 2>&1)
+if [[ $? -ne 0 ]] ; then
echo >&2 "Java not found, or an error was encountered when running java."
- echo >&2 "A working Java 7 or later is required to run Solr!"
- echo >&2 "Please install Java or fix JAVA_HOME before running this script."
- echo >&2 "Command that we tried: '${JAVA} -version'"
+ echo >&2 "A working Java $JAVA_VER_REQ JRE is required to run Solr!"
+ echo >&2 "Please install latest version of Java $JAVA_VER_REQ or set JAVA_HOME properly."
+ echo >&2 "Command that we tried: '${JAVA} -version', with response:"
+ echo >&2 "${JAVA_VER}"
+ echo >&2
+ echo >&2 "Debug information:"
+ echo >&2 "JAVA_HOME: ${JAVA_HOME:-N/A}"
echo >&2 "Active Path:"
echo >&2 "${PATH}"
exit 1
-}
+else
+ JAVA_VER_NUM=$(echo $JAVA_VER | head -1 | awk -F '"' '/version/ {print $2}')
+ if [[ "$JAVA_VER_NUM" < "$JAVA_VER_REQ" ]] ; then
+ echo >&2 "Your current version of Java is too old to run this version of Solr"
+ echo >&2 "We found version $JAVA_VER_NUM, using command '${JAVA} -version', with response:"
+ echo >&2 "${JAVA_VER}"
+ echo >&2
+ echo >&2 "Please install latest version of Java $JAVA_VER_REQ or set JAVA_HOME properly."
+ echo >&2
+ echo >&2 "Debug information:"
+ echo >&2 "JAVA_HOME: ${JAVA_HOME:-N/A}"
+ echo >&2 "Active Path:"
+ echo >&2 "${PATH}"
+ exit 1
+ fi
+ JAVA_VENDOR="Oracle"
+ if [ "`echo $JAVA_VER | grep -i "IBM J9"`" != "" ]; then
+ JAVA_VENDOR="IBM J9"
+ fi
+fi
+
# Select HTTP OR HTTPS related configurations
SOLR_URL_SCHEME=http
@@ -134,30 +166,109 @@ SOLR_SSL_OPTS=""
if [ -n "$SOLR_SSL_KEY_STORE" ]; then
SOLR_JETTY_CONFIG+=("--module=https")
SOLR_URL_SCHEME=https
- SOLR_SSL_OPTS=" -Dsolr.jetty.keystore=$SOLR_SSL_KEY_STORE \
- -Dsolr.jetty.keystore.password=$SOLR_SSL_KEY_STORE_PASSWORD \
- -Dsolr.jetty.truststore=$SOLR_SSL_TRUST_STORE \
- -Dsolr.jetty.truststore.password=$SOLR_SSL_TRUST_STORE_PASSWORD \
- -Dsolr.jetty.ssl.needClientAuth=$SOLR_SSL_NEED_CLIENT_AUTH \
- -Dsolr.jetty.ssl.wantClientAuth=$SOLR_SSL_WANT_CLIENT_AUTH"
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore=$SOLR_SSL_KEY_STORE"
+ if [ -n "$SOLR_SSL_KEY_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore.password=$SOLR_SSL_KEY_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_KEY_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore.type=$SOLR_SSL_KEY_STORE_TYPE"
+ fi
+
+ if [ -n "$SOLR_SSL_TRUST_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore=$SOLR_SSL_TRUST_STORE"
+ fi
+ if [ -n "$SOLR_SSL_TRUST_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore.password=$SOLR_SSL_TRUST_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_TRUST_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore.type=$SOLR_SSL_TRUST_STORE_TYPE"
+ fi
+
+ if [ -n "$SOLR_SSL_NEED_CLIENT_AUTH" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.ssl.needClientAuth=$SOLR_SSL_NEED_CLIENT_AUTH"
+ fi
+ if [ -n "$SOLR_SSL_WANT_CLIENT_AUTH" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.ssl.wantClientAuth=$SOLR_SSL_WANT_CLIENT_AUTH"
+ fi
+
if [ -n "$SOLR_SSL_CLIENT_KEY_STORE" ]; then
- SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_CLIENT_KEY_STORE \
- -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD \
- -Djavax.net.ssl.trustStore=$SOLR_SSL_CLIENT_TRUST_STORE \
- -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD"
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_CLIENT_KEY_STORE"
+
+ if [ -n "$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_CLIENT_KEY_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStoreType=$SOLR_SSL_CLIENT_KEY_STORE_TYPE"
+ fi
+ else
+ if [ -n "$SOLR_SSL_KEY_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_KEY_STORE"
+ fi
+ if [ -n "$SOLR_SSL_KEY_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_KEY_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_KEY_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStoreType=$SOLR_SSL_KEYSTORE_TYPE"
+ fi
+ fi
+
+ if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStore=$SOLR_SSL_CLIENT_TRUST_STORE"
+
+ if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD"
+ fi
+
+ if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStoreType=$SOLR_SSL_CLIENT_TRUST_STORE_TYPE"
+ fi
else
- SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_KEY_STORE \
- -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_KEY_STORE_PASSWORD \
- -Djavax.net.ssl.trustStore=$SOLR_SSL_TRUST_STORE \
- -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_TRUST_STORE_PASSWORD"
+ if [ -n "$SOLR_SSL_TRUST_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStore=$SOLR_SSL_TRUST_STORE"
+ fi
+
+ if [ -n "$SOLR_SSL_TRUST_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_TRUST_STORE_PASSWORD"
+ fi
+
+ if [ -n "$SOLR_SSL_TRUST_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStoreType=$SOLR_SSL_TRUST_STORE_TYPE"
+ fi
fi
else
SOLR_JETTY_CONFIG+=("--module=http")
fi
# Authentication options
+if [ -z "$SOLR_AUTH_TYPE" ] && [ -n "$SOLR_AUTHENTICATION_OPTS" ]; then
+ echo "WARNING: SOLR_AUTHENTICATION_OPTS environment variable configured without associated SOLR_AUTH_TYPE variable"
+ echo " Please configure SOLR_AUTH_TYPE environment variable with the authentication type to be used."
+ echo " Currently supported authentication types are [kerberos, basic]"
+fi
+
+if [ -n "$SOLR_AUTH_TYPE" ] && [ -n "$SOLR_AUTHENTICATION_CLIENT_CONFIGURER" ]; then
+ echo "WARNING: SOLR_AUTHENTICATION_CLIENT_CONFIGURER and SOLR_AUTH_TYPE environment variables are configured together."
+ echo " Use SOLR_AUTH_TYPE environment variable to configure authentication type to be used. "
+ echo " Currently supported authentication types are [kerberos, basic]"
+ echo " The value of SOLR_AUTHENTICATION_CLIENT_CONFIGURER environment variable will be ignored"
+fi
+
+if [ -n "$SOLR_AUTH_TYPE" ]; then
+ case "$(echo $SOLR_AUTH_TYPE | awk '{print tolower($0)}')" in
+ basic)
+ SOLR_AUTHENTICATION_CLIENT_CONFIGURER="org.apache.solr.client.solrj.impl.PreemptiveBasicAuthConfigurer"
+ ;;
+ kerberos)
+ SOLR_AUTHENTICATION_CLIENT_CONFIGURER="org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer"
+ ;;
+ *)
+ echo "ERROR: Value specified for SOLR_AUTH_TYPE environment variable is invalid."
+ exit 1
+ esac
+fi
+
if [ "$SOLR_AUTHENTICATION_CLIENT_CONFIGURER" != "" ]; then
- AUTHC_CLIENT_CONFIGURER_ARG="-Dsolr.authentication.httpclient.configurer=$SOLR_AUTHENTICATION_CLIENT_CONFIGURER"
+ AUTHC_CLIENT_CONFIGURER_ARG="-Dsolr.httpclient.builder.factory=$SOLR_AUTHENTICATION_CLIENT_CONFIGURER"
fi
AUTHC_OPTS="$AUTHC_CLIENT_CONFIGURER_ARG $SOLR_AUTHENTICATION_OPTS"
@@ -179,7 +290,7 @@ function print_usage() {
if [ -z "$CMD" ]; then
echo ""
echo "Usage: solr COMMAND OPTIONS"
- echo " where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk"
+ echo " where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk, auth"
echo ""
echo " Standalone server example (start Solr running in the background on port 8984):"
echo ""
@@ -206,7 +317,7 @@ function print_usage() {
echo ""
echo " -p Specify the port to start the Solr HTTP listener on; default is 8983"
echo " The specified port (SOLR_PORT) will also be used to determine the stop port"
- echo " STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(1\$SOLR_PORT). "
+ echo " STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(\$SOLR_PORT+10000). "
echo " For instance, if you set -p 8985, then the STOP_PORT=7985 and RMI_PORT=18985"
echo ""
echo " -d Specify the Solr server directory; defaults to server"
@@ -237,7 +348,9 @@ function print_usage() {
echo ""
echo " -noprompt Don't prompt for input; accept all defaults when running examples that accept user input"
echo ""
- echo " -V Verbose messages from this script"
+ echo " -v and -q Verbose (-v) or quiet (-q) logging. Sets default log level to DEBUG or WARN instead of INFO"
+ echo ""
+ echo " -V or -verbose Verbose messages from this script"
echo ""
elif [ "$CMD" == "stop" ]; then
echo ""
@@ -354,28 +467,132 @@ function print_usage() {
echo " Solr instance and will use the port of the first server it finds."
echo ""
elif [ "$CMD" == "zk" ]; then
- echo "Usage: solr zk [-upconfig|-downconfig] [-d confdir] [-n configName] [-z zkHost]"
+ print_short_zk_usage ""
+ echo " Be sure to check the Solr logs in case of errors."
+ echo ""
+ echo " -z zkHost Optional Zookeeper connection string for all commands. If specified it"
+ echo " overrides the 'ZK_HOST=...'' defined in solr.in.sh."
+ echo ""
+ echo " upconfig uploads a configset from the local machine to Zookeeper. (Backcompat: -upconfig)"
+ echo ""
+ echo " downconfig downloads a configset from Zookeeper to the local machine. (Backcompat: -downconfig)"
+ echo ""
+ echo " -n configName Name of the configset in Zookeeper that will be the destination of"
+ echo " 'upconfig' and the source for 'downconfig'."
+ echo ""
+ echo " -d confdir The local directory the configuration will be uploaded from for"
+ echo " 'upconfig' or downloaded to for 'downconfig'. If 'confdir' is a child of"
+ echo " ...solr/server/solr/configsets' then the configs will be copied from/to"
+ echo " that directory. Otherwise it is interpreted as a simple local path."
+ echo ""
+ echo " cp copies files or folders to/from Zookeeper or Zokeeper -> Zookeeper"
+ echo " -r Recursively copy to . Command will fail if has children and "
+ echo " -r is not specified. Optional"
+ echo ""
+ echo " , : [file:][/]path/to/local/file or zk:/path/to/zk/node"
+ echo " NOTE: and may both be Zookeeper resources prefixed by 'zk:'"
+ echo " When is a zk resource, may be '.'"
+ echo " If ends with '/', then will be a local folder or parent znode and the last"
+ echo " element of the path will be appended unless also ends in a slash. "
+ echo " may be zk:, which may be useful when using the cp -r form to backup/restore "
+ echo " the entire zk state."
+ echo " You must enclose local paths that end in a wildcard in quotes or just"
+ echo " end the local path in a slash. That is,"
+ echo " 'bin/solr zk cp -r /some/dir/ zk:/ -z localhost:2181' is equivalent to"
+ echo " 'bin/solr zk cp -r \"/some/dir/*\" zk:/ -z localhost:2181'"
+ echo " but 'bin/solr zk cp -r /some/dir/* zk:/ -z localhost:2181' will throw an error"
+ echo ""
+ echo " here's an example of backup/restore for a ZK configuration:"
+ echo " to copy to local: 'bin/solr zk cp -r zk:/ /some/dir -z localhost:2181'"
+ echo " to restore to ZK: 'bin/solr zk cp -r /some/dir/ zk:/ -z localhost:2181'"
+ echo ""
+ echo " The 'file:' prefix is stripped, thus 'file:/wherever' specifies an absolute local path and"
+ echo " 'file:somewhere' specifies a relative local path. All paths on Zookeeper are absolute."
+ echo ""
+ echo " Zookeeper nodes CAN have data, so moving a single file to a parent znode"
+ echo " will overlay the data on the parent Znode so specifying the trailing slash"
+ echo " can be important."
+ echo ""
+ echo " Wildcards are supported when copying from local, trailing only and must be quoted."
+ echo ""
+ echo " rm deletes files or folders on Zookeeper"
+ echo " -r Recursively delete if is a directory. Command will fail if "
+ echo " has children and -r is not specified. Optional"
+ echo " : [zk:]/path/to/zk/node. may not be the root ('/')"
+ echo ""
+ echo " mv moves (renames) znodes on Zookeeper"
+ echo " , : Zookeeper nodes, the 'zk:' prefix is optional."
+ echo " If ends with '/', then will be a parent znode"
+ echo " and the last element of the path will be appended."
+ echo " Zookeeper nodes CAN have data, so moving a single file to a parent znode"
+ echo " will overlay the data on the parent Znode so specifying the trailing slash"
+ echo " is important."
+ echo ""
+ echo " ls lists the znodes on Zookeeper"
+ echo " -r recursively descends the path listing all znodes. Optional"
+ echo " : The Zookeeper path to use as the root."
+ echo ""
+ echo " Only the node names are listed, not data"
+ echo ""
+ echo " mkroot makes a znode on Zookeeper with no data. Can be used to make a path of arbitrary"
+ echo " depth but primarily intended to create a 'chroot'."
echo ""
- echo " -upconfig to move a configset from the local machine to Zookeeper."
+ echo " : The Zookeeper path to create. Leading slash is assumed if not present."
+ echo " Intermediate nodes are created as needed if not present."
echo ""
- echo " -downconfig to move a configset from Zookeeper to the local machine."
+ elif [ "$CMD" == "auth" ]; then
echo ""
- echo " -n configName Name of the configset in Zookeeper that will be the destinatino of"
- echo " 'upconfig' and the source for 'downconfig'."
+ echo "Usage: solr auth enable [-type basicAuth] -credentials user:pass [-blockUnknown ] [-updateIncludeFileOnly ]"
+ echo " solr auth enable [-type basicAuth] -prompt [-blockUnknown ] [-updateIncludeFileOnly ]"
+ echo " solr auth disable [-updateIncludeFileOnly ]"
echo ""
- echo " -d confdir The local directory the configuration will be uploaded from for"
- echo " 'upconfig' or downloaded to for 'downconfig'. For 'upconfig', this"
- echo " can be one of the example configsets, basic_configs, data_driven_schema_configs or"
- echo " sample_techproducts_configs or an arbitrary directory."
+ echo " -type The authentication mechanism to enable. Defaults to 'basicAuth'."
echo ""
- echo " -z zkHost Zookeeper connection string."
+ echo " -credentials The username and password of the initial user"
+ echo " Note: only one of -prompt or -credentials must be provided"
echo ""
- echo " NOTE: Solr must have been started least once (or have it running) before using this command."
- echo " This initialized Zookeeper for Solr"
+ echo " -prompt Prompts the user to provide the credentials"
+ echo " Note: only one of -prompt or -credentials must be provided"
+ echo ""
+ echo " -blockUnknown When true, this blocks out access to unauthenticated users. When not provided,"
+ echo " this defaults to false (i.e. unauthenticated users can access all endpoints, except the"
+ echo " operations like collection-edit, security-edit, core-admin-edit etc.). Check the reference"
+ echo " guide for Basic Authentication for more details."
+ echo ""
+ echo " -updateIncludeFileOnly Only update the solr.in.sh or solr.in.cmd file, and skip actual enabling/disabling"
+ echo " authentication (i.e. don't update security.json)"
+ echo ""
+ echo " -z zkHost Zookeeper connection string"
+ echo ""
+ echo " -d Specify the Solr server directory"
+ echo ""
+ echo " -s Specify the Solr home directory. This is where any credentials or authentication"
+ echo " configuration files (e.g. basicAuth.conf) would be placed."
echo ""
fi
} # end print_usage
+function print_short_zk_usage() {
+
+ if [ "$1" != "" ]; then
+ echo -e "\nERROR: $1\n"
+ fi
+
+ echo " Usage: solr zk upconfig|downconfig -d -n [-z zkHost]"
+ echo " solr zk cp [-r] [-z zkHost]"
+ echo " solr zk rm [-r] [-z zkHost]"
+ echo " solr zk mv [-z zkHost]"
+ echo " solr zk ls [-r] [-z zkHost]"
+ echo " solr zk mkroot [-z zkHost]"
+ echo ""
+
+ if [ "$1" == "" ]; then
+ echo "Type bin/solr zk -help for full usage help"
+ else
+ exit 1
+ fi
+}
+
# used to show the script is still alive when waiting on work to complete
function spinner() {
local pid=$1
@@ -407,7 +624,7 @@ function solr_pid_by_port() {
# extract the value of the -Djetty.port parameter from a running Solr process
function jetty_port() {
SOLR_PID="$1"
- SOLR_PROC=`ps auxww | grep -w $SOLR_PID | grep start\.jar | grep jetty.port`
+ SOLR_PROC=`ps auxww | grep -w $SOLR_PID | grep start\.jar | grep jetty\.port`
IFS=' ' read -a proc_args <<< "$SOLR_PROC"
for arg in "${proc_args[@]}"
do
@@ -455,10 +672,10 @@ function get_info() {
done < <(find "$SOLR_PID_DIR" -name "solr-*.pid" -type f)
else
# no pid files but check using ps just to be sure
- numSolrs=`ps auxww | grep start\.jar | grep solr.solr.home | grep -v grep | wc -l | sed -e 's/^[ \t]*//'`
+ numSolrs=`ps auxww | grep start\.jar | grep solr\.solr\.home | grep -v grep | wc -l | sed -e 's/^[ \t]*//'`
if [ "$numSolrs" != "0" ]; then
echo -e "\nFound $numSolrs Solr nodes: "
- PROCESSES=$(ps auxww | grep start\.jar | grep solr.solr.home | grep -v grep | awk '{print $2}' | sort -r)
+ PROCESSES=$(ps auxww | grep start\.jar | grep solr\.solr\.home | grep -v grep | awk '{print $2}' | sort -r)
for ID in $PROCESSES
do
port=`jetty_port "$ID"`
@@ -490,9 +707,24 @@ function stop_solr() {
SOLR_PID="$4"
if [ "$SOLR_PID" != "" ]; then
- echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting 5 seconds to allow Jetty process $SOLR_PID to stop gracefully."
+ echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting up to $SOLR_STOP_WAIT seconds to allow Jetty process $SOLR_PID to stop gracefully."
"$JAVA" $SOLR_SSL_OPTS $AUTHC_OPTS -jar "$DIR/start.jar" "STOP.PORT=$STOP_PORT" "STOP.KEY=$STOP_KEY" --stop || true
- (sleep 5) &
+ (loops=0
+ while true
+ do
+ CHECK_PID=`ps auxww | awk '{print $2}' | grep -w $SOLR_PID | sort -r | tr -d ' '`
+ if [ "$CHECK_PID" != "" ]; then
+ slept=$((loops * 2))
+ if [ $slept -lt $SOLR_STOP_WAIT ]; then
+ sleep 2
+ loops=$[$loops+1]
+ else
+ exit # subshell!
+ fi
+ else
+ exit # subshell!
+ fi
+ done) &
spinner $!
rm -f "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
else
@@ -555,6 +787,12 @@ if [ "$SCRIPT_CMD" == "status" ]; then
exit
fi
+# assert tool
+if [ "$SCRIPT_CMD" == "assert" ]; then
+ run_tool assert $*
+ exit $?
+fi
+
# run a healthcheck and exit if requested
if [ "$SCRIPT_CMD" == "healthcheck" ]; then
@@ -571,7 +809,7 @@ if [ "$SCRIPT_CMD" == "healthcheck" ]; then
;;
-z|-zkhost)
if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "ZooKeepeer connection string is required when using the $1 option!"
+ print_usage "$SCRIPT_CMD" "ZooKeeper connection string is required when using the $1 option!"
exit 1
fi
ZK_HOST="$2"
@@ -617,6 +855,7 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
CREATE_NUM_SHARDS=1
CREATE_REPFACT=1
+ FORCE=false
if [ $# -gt 0 ]; then
while true; do
@@ -669,6 +908,10 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
CREATE_PORT="$2"
shift 2
;;
+ -force)
+ FORCE=true
+ shift
+ ;;
-help|-usage)
print_usage "$SCRIPT_CMD"
exit 0
@@ -726,6 +969,11 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
exit 1
fi
+ if [[ "$(whoami)" == "root" ]] && [[ "$FORCE" == "false" ]] ; then
+ echo "WARNING: Creating cores as the root user can cause Solr to fail and is not advisable. Exiting."
+ echo " If you started Solr as root (not advisable either), force core creation by adding argument -force"
+ exit 1
+ fi
if [ "$SCRIPT_CMD" == "create_core" ]; then
run_tool create_core -name "$CREATE_NAME" -solrUrl "$SOLR_URL_SCHEME://$SOLR_TOOL_HOST:$CREATE_PORT/solr" \
-confdir "$CREATE_CONFDIR" -configsetsDir "$SOLR_TIP/server/solr/configsets"
@@ -821,105 +1069,285 @@ if [[ "$SCRIPT_CMD" == "delete" ]]; then
exit $?
fi
-# Upload or download a configset to Zookeeper
+ZK_RECURSE=false
+# Zookeeper file maintenance (upconfig, downconfig, files up/down etc.)
+# It's a little clumsy to have the parsing go round and round for upconfig and downconfig, but that's
+# necessary for back-compat
if [[ "$SCRIPT_CMD" == "zk" ]]; then
if [ $# -gt 0 ]; then
while true; do
case "$1" in
- -z|-zkhost)
- if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "ZooKeepeer connection string is required when using the $1 option!"
- exit 1
- fi
- ZK_HOST="$2"
- shift 2
- ;;
- -n|-confname)
- if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "Configuration name is required when using the $1 option!"
- exit 1
- fi
- CONFIGSET_CONFNAME="$2"
- shift 2
- ;;
- -d|-confdir)
- if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "Configuration directory is required when using the $1 option!"
- exit 1
- fi
- CONFIGSET_CONFDIR="$2"
- shift 2
- ;;
- -upconfig)
- ZK_OP="upconfig"
- shift 1
- ;;
- -downconfig)
- ZK_OP="downconfig"
- shift 1
- ;;
- -help|-usage|-h)
- print_usage "$SCRIPT_CMD"
- exit 0
- ;;
- --)
- shift
- break
- ;;
- *)
- if [ "$1" != "" ]; then
- print_usage "$SCRIPT_CMD" "Unrecognized or misplaced argument: $1!"
- exit 1
+ -upconfig|upconfig|-downconfig|downconfig|cp|rm|mv|ls|mkroot)
+ if [ "${1:0:1}" == "-" ]; then
+ ZK_OP=${1:1}
+ else
+ ZK_OP=$1
+ fi
+ shift 1
+ ;;
+ -z|-zkhost)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_short_zk_usage "$SCRIPT_CMD" "ZooKeeper connection string is required when using the $1 option!"
+ fi
+ ZK_HOST="$2"
+ shift 2
+ ;;
+ -n|-confname)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_short_zk_usage "$SCRIPT_CMD" "Configuration name is required when using the $1 option!"
+ fi
+ CONFIGSET_CONFNAME="$2"
+ shift 2
+ ;;
+ -d|-confdir)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_short_zk_usage "$SCRIPT_CMD" "Configuration directory is required when using the $1 option!"
+ fi
+ CONFIGSET_CONFDIR="$2"
+ shift 2
+ ;;
+ -r)
+ ZK_RECURSE="true"
+ shift
+ ;;
+ -help|-usage|-h)
+ print_usage "$SCRIPT_CMD"
+ exit 0
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *) # Pick up or params for rm, ls, cp, mv, mkroot.
+ if [ "$1" == "" ]; then
+ break # out-of-args, stop looping
+ fi
+ if [ -z "$ZK_SRC" ]; then
+ ZK_SRC=$1
+ else
+ if [ -z "$ZK_DST" ]; then
+ ZK_DST=$1
else
- break # out-of-args, stop looping
+ print_short_zk_usage "Unrecognized or misplaced command $1. 'cp' with trailing asterisk requires quoting, see help text."
fi
- ;;
+ fi
+ shift
+ ;;
esac
done
fi
if [ -z "$ZK_OP" ]; then
- echo "Zookeeper operation (one of '-upconfig' or '-downconfig') is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ print_short_zk_usage "Zookeeper operation (one of 'upconfig', 'downconfig', 'rm', 'mv', 'cp', 'ls', 'mkroot') is required!"
fi
if [ -z "$ZK_HOST" ]; then
- echo "Zookeeper address (-z) argument is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ print_short_zk_usage "Zookeeper address (-z) argument is required or ZK_HOST must be specified in the solr.in.sh file."
fi
- if [ -z "$CONFIGSET_CONFDIR" ]; then
- echo "Local directory of the configset (-d) argument is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ if [[ "$ZK_OP" == "upconfig" || "$ZK_OP" == "downconfig" ]]; then
+ if [ -z "$CONFIGSET_CONFDIR" ]; then
+ print_short_zk_usage "Local directory of the configset (-d) argument is required!"
+ fi
+
+ if [ -z "$CONFIGSET_CONFNAME" ]; then
+ print_short_zk_usage "Configset name on Zookeeper (-n) argument is required!"
+ fi
fi
- if [ -z "$CONFIGSET_CONFNAME" ]; then
- echo "Configset name on Zookeeper (-n) argument is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ if [[ "$ZK_OP" == "cp" || "$ZK_OP" == "mv" ]]; then
+ if [[ -z "$ZK_SRC" || -z "$ZK_DST" ]]; then
+ print_short_zk_usage " and must be specified when using either the 'mv' or 'cp' commands."
+ fi
+ if [[ "$ZK_OP" == "cp" && "${ZK_SRC:0:3}" != "zk:" && "${ZK_DST:0:3}" != "zk:" ]]; then
+ print_short_zk_usage "One of the source or desintation paths must be prefixed by 'zk:' for the 'cp' command."
+ fi
fi
- if [ "$ZK_OP" == "upconfig" ]; then
- run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST" -configsetsDir "$SOLR_TIP/server/solr/configsets"
+ if [[ "$ZK_OP" == "mkroot" ]]; then
+ if [[ -z "$ZK_SRC" ]]; then
+ print_short_zk_usage " must be specified when using the 'mkroot' command."
+ fi
+ fi
+
+
+ case "$ZK_OP" in
+ upconfig)
+ run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST" -configsetsDir "$SOLR_TIP/server/solr/configsets"
+ ;;
+ downconfig)
+ run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST"
+ ;;
+ rm)
+ if [ -z "$ZK_SRC" ]; then
+ print_short_zk_usage "Zookeeper path to remove must be specified when using the 'rm' command"
+ fi
+ run_tool "$ZK_OP" -path "$ZK_SRC" -zkHost "$ZK_HOST" -recurse "$ZK_RECURSE"
+ ;;
+ mv)
+ run_tool "$ZK_OP" -src "$ZK_SRC" -dst "$ZK_DST" -zkHost "$ZK_HOST"
+ ;;
+ cp)
+ run_tool "$ZK_OP" -src "$ZK_SRC" -dst "$ZK_DST" -zkHost "$ZK_HOST" -recurse "$ZK_RECURSE"
+ ;;
+ ls)
+ if [ -z "$ZK_SRC" ]; then
+ print_short_zk_usage "Zookeeper path to list must be specified when using the 'ls' command"
+ fi
+ run_tool "$ZK_OP" -path "$ZK_SRC" -recurse "$ZK_RECURSE" -zkHost "$ZK_HOST"
+ ;;
+ mkroot)
+ if [ -z "$ZK_SRC" ]; then
+ print_short_zk_usage "Zookeeper path to list must be specified when using the 'mkroot' command"
+ fi
+ run_tool "$ZK_OP" -path "$ZK_SRC" -zkHost "$ZK_HOST"
+ ;;
+ *)
+ print_short_zk_usage "Unrecognized Zookeeper operation $ZK_OP"
+ ;;
+ esac
+
+ exit $?
+fi
+
+if [[ "$SCRIPT_CMD" == "auth" ]]; then
+ declare -a AUTH_PARAMS
+ if [ $# -gt 0 ]; then
+ while true; do
+ case "$1" in
+ enable|disable)
+ AUTH_OP=$1
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "$AUTH_OP")
+ shift
+ ;;
+ -z|-zkhost|zkHost)
+ ZK_HOST="$2"
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-zkHost" "$ZK_HOST")
+ shift 2
+ ;;
+ -t|-type)
+ AUTH_TYPE="$2"
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-type" "$AUTH_TYPE")
+ shift 2
+ ;;
+ -credentials)
+ AUTH_CREDENTIALS="$2"
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-credentials" "$AUTH_CREDENTIALS")
+ shift 2
+ ;;
+ -solrIncludeFile)
+ SOLR_INCLUDE="$2"
+ shift 2
+ ;;
+ -prompt)
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-prompt" "$2")
+ shift
+ ;;
+ -blockUnknown)
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-blockUnknown" "$2")
+ shift
+ break
+ ;;
+ -updateIncludeFileOnly)
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-updateIncludeFileOnly" "$2")
+ shift
+ break
+ ;;
+ -d|-dir)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_usage "$SCRIPT_CMD" "Server directory is required when using the $1 option!"
+ exit 1
+ fi
+
+ if [[ "$2" == "." || "$2" == "./" || "$2" == ".." || "$2" == "../" ]]; then
+ SOLR_SERVER_DIR="$(pwd)/$2"
+ else
+ # see if the arg value is relative to the tip vs full path
+ if [[ "$2" != /* ]] && [[ -d "$SOLR_TIP/$2" ]]; then
+ SOLR_SERVER_DIR="$SOLR_TIP/$2"
+ else
+ SOLR_SERVER_DIR="$2"
+ fi
+ fi
+ # resolve it to an absolute path
+ SOLR_SERVER_DIR="$(cd "$SOLR_SERVER_DIR"; pwd)"
+ shift 2
+ ;;
+ -s|-solr.home)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_usage "$SCRIPT_CMD" "Solr home directory is required when using the $1 option!"
+ exit 1
+ fi
+
+ SOLR_HOME="$2"
+ shift 2
+ ;;
+ -help|-usage|-h)
+ print_usage "$SCRIPT_CMD"
+ exit 0
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ done
+ fi
+
+ if [ -z "$SOLR_SERVER_DIR" ]; then
+ SOLR_SERVER_DIR="$DEFAULT_SERVER_DIR"
+ fi
+ if [ ! -e "$SOLR_SERVER_DIR" ]; then
+ echo -e "\nSolr server directory $SOLR_SERVER_DIR not found!\n"
+ exit 1
+ fi
+ if [ -z "$SOLR_HOME" ]; then
+ SOLR_HOME="$SOLR_SERVER_DIR/solr"
else
- run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST"
+ if [[ $SOLR_HOME != /* ]] && [[ -d "$SOLR_SERVER_DIR/$SOLR_HOME" ]]; then
+ SOLR_HOME="$SOLR_SERVER_DIR/$SOLR_HOME"
+ SOLR_PID_DIR="$SOLR_HOME"
+ elif [[ $SOLR_HOME != /* ]] && [[ -d "`pwd`/$SOLR_HOME" ]]; then
+ SOLR_HOME="$(pwd)/$SOLR_HOME"
+ fi
fi
+ if [ -z "$AUTH_OP" ]; then
+ print_usage "$SCRIPT_CMD"
+ exit 0
+ fi
+
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-solrIncludeFile" "$SOLR_INCLUDE")
+
+ if [ -z "$AUTH_PORT" ]; then
+ for ID in `ps auxww | grep java | grep start\.jar | awk '{print $2}' | sort -r`
+ do
+ port=`jetty_port "$ID"`
+ if [ "$port" != "" ]; then
+ AUTH_PORT=$port
+ break
+ fi
+ done
+ fi
+ run_tool auth ${AUTH_PARAMS[@]} -solrUrl "$SOLR_URL_SCHEME://$SOLR_TOOL_HOST:$AUTH_PORT/solr" -authConfDir "$SOLR_HOME"
exit $?
fi
+
# verify the command given is supported
-if [ "$SCRIPT_CMD" != "stop" ] && [ "$SCRIPT_CMD" != "start" ] && [ "$SCRIPT_CMD" != "restart" ] && [ "$SCRIPT_CMD" != "status" ]; then
+if [ "$SCRIPT_CMD" != "stop" ] && [ "$SCRIPT_CMD" != "start" ] && [ "$SCRIPT_CMD" != "restart" ] && [ "$SCRIPT_CMD" != "status" ] && [ "$SCRIPT_CMD" != "assert" ]; then
print_usage "" "$SCRIPT_CMD is not a valid command!"
exit 1
fi
# Run in foreground (default is to run in the background)
FG="false"
+FORCE=false
noprompt=false
SOLR_OPTS=($SOLR_OPTS)
PASS_TO_RUN_EXAMPLE=
@@ -1033,10 +1461,22 @@ if [ $# -gt 0 ]; then
PASS_TO_RUN_EXAMPLE+=" --verbose"
shift
;;
+ -v)
+ SOLR_LOG_LEVEL=DEBUG
+ shift
+ ;;
+ -q)
+ SOLR_LOG_LEVEL=WARN
+ shift
+ ;;
-all)
stop_all=true
shift
;;
+ -force)
+ FORCE=true
+ shift
+ ;;
--)
shift
break
@@ -1060,6 +1500,10 @@ if [ $# -gt 0 ]; then
done
fi
+if [[ $SOLR_LOG_LEVEL ]] ; then
+ SOLR_LOG_LEVEL_OPT="-Dsolr.log.level=$SOLR_LOG_LEVEL"
+fi
+
if [ -z "$SOLR_SERVER_DIR" ]; then
SOLR_SERVER_DIR="$DEFAULT_SERVER_DIR"
fi
@@ -1157,13 +1601,21 @@ if [ -z "$STOP_PORT" ]; then
STOP_PORT=`expr $SOLR_PORT - 1000`
fi
+if [ "$SCRIPT_CMD" == "start" ] || [ "$SCRIPT_CMD" == "restart" ] ; then
+ if [[ "$(whoami)" == "root" ]] && [[ "$FORCE" == "false" ]] ; then
+ echo "WARNING: Starting Solr as the root user is a security risk and not considered best practice. Exiting."
+ echo " Please consult the Reference Guide. To override this check, start with argument '-force'"
+ exit 1
+ fi
+fi
+
if [[ "$SCRIPT_CMD" == "start" ]]; then
# see if Solr is already running
SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
if [ -z "$SOLR_PID" ]; then
# not found using the pid file ... but use ps to ensure not found
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
fi
if [ "$SOLR_PID" != "" ]; then
@@ -1176,7 +1628,7 @@ else
SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
if [ -z "$SOLR_PID" ]; then
# not found using the pid file ... but use ps to ensure not found
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
fi
if [ "$SOLR_PID" != "" ]; then
stop_solr "$SOLR_SERVER_DIR" "$SOLR_PORT" "$STOP_KEY" "$SOLR_PID"
@@ -1226,38 +1678,49 @@ if [ ! -e "$SOLR_HOME" ]; then
echo -e "\nSolr home directory $SOLR_HOME not found!\n"
exit 1
fi
-
-# backup the log files before starting
-if [ -f "$SOLR_LOGS_DIR/solr.log" ]; then
- if $verbose ; then
- echo "Backing up $SOLR_LOGS_DIR/solr.log"
- fi
- mv "$SOLR_LOGS_DIR/solr.log" "$SOLR_LOGS_DIR/solr_log_$(date +"%Y%m%d_%H%M")"
+if $verbose ; then
+ q=""
+else
+ q="-q"
fi
-
-if [ -f "$SOLR_LOGS_DIR/solr_gc.log" ]; then
- if $verbose ; then
- echo "Backing up $SOLR_LOGS_DIR/solr_gc.log"
- fi
- mv "$SOLR_LOGS_DIR/solr_gc.log" "$SOLR_LOGS_DIR/solr_gc_log_$(date +"%Y%m%d_%H%M")"
+if [ "${SOLR_LOG_PRESTART_ROTATION:=true}" == "true" ]; then
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -remove_old_solr_logs 7 || echo "Failed removing old solr logs"
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -archive_gc_logs $q || echo "Failed archiving old GC logs"
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -archive_console_logs || echo "Failed archiving old console logs"
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -rotate_solr_logs 9 || echo "Failed rotating old solr logs"
fi
-java_ver_out=`echo "$("$JAVA" -version 2>&1)"`
-JAVA_VERSION=`echo $java_ver_out | grep "java version" | awk '{ print substr($3, 2, length($3)-2); }'`
-JAVA_VENDOR="Oracle"
-if [ "`echo $java_ver_out | grep -i "IBM J9"`" != "" ]; then
- JAVA_VENDOR="IBM J9"
+# Establish default GC logging opts if no env var set (otherwise init to sensible default)
+if [ -z ${GC_LOG_OPTS+x} ]; then
+ if [[ "$JAVA_VER_NUM" < "9" ]] ; then
+ GC_LOG_OPTS=('-verbose:gc' '-XX:+PrintHeapAtGC' '-XX:+PrintGCDetails' \
+ '-XX:+PrintGCDateStamps' '-XX:+PrintGCTimeStamps' '-XX:+PrintTenuringDistribution' \
+ '-XX:+PrintGCApplicationStoppedTime')
+ else
+ GC_LOG_OPTS=('-Xlog:gc*')
+ fi
+else
+ GC_LOG_OPTS=($GC_LOG_OPTS)
fi
-# if verbose gc logging enabled, setup the location of the log file
+# if verbose gc logging enabled, setup the location of the log file and rotation
if [ "$GC_LOG_OPTS" != "" ]; then
- gc_log_flag="-Xloggc"
- if [ "$JAVA_VENDOR" == "IBM J9" ]; then
- gc_log_flag="-Xverbosegclog"
+ if [[ "$JAVA_VER_NUM" < "9" ]] ; then
+ gc_log_flag="-Xloggc"
+ if [ "$JAVA_VENDOR" == "IBM J9" ]; then
+ gc_log_flag="-Xverbosegclog"
+ fi
+ GC_LOG_OPTS+=("$gc_log_flag:$SOLR_LOGS_DIR/solr_gc.log" '-XX:+UseGCLogFileRotation' '-XX:NumberOfGCLogFiles=9' '-XX:GCLogFileSize=20M')
+ else
+ # http://openjdk.java.net/jeps/158
+ for i in "${!GC_LOG_OPTS[@]}";
+ do
+ # for simplicity, we only look at the prefix '-Xlog:gc'
+ # (if 'all' or multiple tags are used starting with anything other then 'gc' the user is on their own)
+ # if a single additional ':' exists in param, then there is already an explicit output specifier
+ GC_LOG_OPTS[$i]=$(echo ${GC_LOG_OPTS[$i]} | sed "s|^\(-Xlog:gc[^:]*$\)|\1:file=$SOLR_LOGS_DIR/solr_gc.log:time,uptime:filecount=9,filesize=20000|")
+ done
fi
- GC_LOG_OPTS=($GC_LOG_OPTS "$gc_log_flag:$SOLR_LOGS_DIR/solr_gc.log")
-else
- GC_LOG_OPTS=()
fi
# If ZK_HOST is defined, the assume SolrCloud mode
@@ -1298,7 +1761,11 @@ fi
if [ "$ENABLE_REMOTE_JMX_OPTS" == "true" ]; then
if [ -z "$RMI_PORT" ]; then
- RMI_PORT="1$SOLR_PORT"
+ RMI_PORT=`expr $SOLR_PORT + 10000`
+ if [ $RMI_PORT -gt 65535 ]; then
+ echo -e "\nRMI_PORT is $RMI_PORT, which is invalid!\n"
+ exit 1
+ fi
fi
REMOTE_JMX_OPTS=('-Dcom.sun.management.jmxremote' \
@@ -1324,6 +1791,12 @@ else
JAVA_MEM_OPTS=("-Xms$SOLR_HEAP" "-Xmx$SOLR_HEAP")
fi
+# Pick default for Java thread stack size, and then add to SOLR_OPTS
+if [ -z ${SOLR_JAVA_STACK_SIZE+x} ]; then
+ SOLR_JAVA_STACK_SIZE='-Xss256k'
+fi
+SOLR_OPTS+=($SOLR_JAVA_STACK_SIZE)
+
if [ -z "$SOLR_TIMEZONE" ]; then
SOLR_TIMEZONE='UTC'
fi
@@ -1336,20 +1809,28 @@ function launch_solr() {
SOLR_ADDL_ARGS="$2"
- GC_TUNE=($GC_TUNE)
- # deal with Java version specific GC and other flags
- if [ "${JAVA_VERSION:0:3}" == "1.7" ]; then
- # Specific Java version hacking
- GC_TUNE+=('-XX:CMSFullGCsBeforeCompaction=1' '-XX:CMSTriggerPermRatio=80')
- if [ "$JAVA_VENDOR" != "IBM J9" ]; then
- JAVA_MINOR_VERSION=${JAVA_VERSION:(-2)}
- if [[ $JAVA_MINOR_VERSION -ge 40 && $JAVA_MINOR_VERSION -le 51 ]]; then
- GC_TUNE+=('-XX:-UseSuperWord')
- echo -e "\nWARNING: Java version $JAVA_VERSION has known bugs with Lucene and requires the -XX:-UseSuperWord flag. Please consider upgrading your JVM.\n"
- fi
- fi
+ # define default GC_TUNE
+ if [ -z ${GC_TUNE+x} ]; then
+ GC_TUNE=('-XX:NewRatio=3' \
+ '-XX:SurvivorRatio=4' \
+ '-XX:TargetSurvivorRatio=90' \
+ '-XX:MaxTenuringThreshold=8' \
+ '-XX:+UseConcMarkSweepGC' \
+ '-XX:+UseParNewGC' \
+ '-XX:ConcGCThreads=4' '-XX:ParallelGCThreads=4' \
+ '-XX:+CMSScavengeBeforeRemark' \
+ '-XX:PretenureSizeThreshold=64m' \
+ '-XX:+UseCMSInitiatingOccupancyOnly' \
+ '-XX:CMSInitiatingOccupancyFraction=50' \
+ '-XX:CMSMaxAbortablePrecleanTime=6000' \
+ '-XX:+CMSParallelRemarkEnabled' \
+ '-XX:+ParallelRefProcEnabled' \
+ '-XX:-OmitStackTraceInFastThrow')
+ else
+ GC_TUNE=($GC_TUNE)
fi
+
# If SSL-related system props are set, add them to SOLR_OPTS
if [ -n "$SOLR_SSL_OPTS" ]; then
# If using SSL and solr.jetty.https.port not set explicitly, use the jetty.port
@@ -1380,17 +1861,22 @@ function launch_solr() {
fi
if [ "$SOLR_OPTS" != "" ]; then
- echo -e " SOLR_OPTS = ${SOLR_OPTS[@]}"
+ echo -e " SOLR_OPTS = ${SOLR_OPTS[@]}"
fi
if [ "$SOLR_ADDL_ARGS" != "" ]; then
- echo -e " SOLR_ADDL_ARGS = $SOLR_ADDL_ARGS"
+ echo -e " SOLR_ADDL_ARGS = $SOLR_ADDL_ARGS"
fi
if [ "$ENABLE_REMOTE_JMX_OPTS" == "true" ]; then
echo -e " RMI_PORT = $RMI_PORT"
echo -e " REMOTE_JMX_OPTS = ${REMOTE_JMX_OPTS[@]}"
fi
+
+ if [ "$SOLR_LOG_LEVEL" != "" ]; then
+ echo -e " SOLR_LOG_LEVEL = $SOLR_LOG_LEVEL"
+ fi
+
echo -e "\n"
fi
@@ -1403,7 +1889,7 @@ function launch_solr() {
fi
SOLR_START_OPTS=('-server' "${JAVA_MEM_OPTS[@]}" "${GC_TUNE[@]}" "${GC_LOG_OPTS[@]}" \
- "${REMOTE_JMX_OPTS[@]}" "${CLOUD_MODE_OPTS[@]}" \
+ "${REMOTE_JMX_OPTS[@]}" "${CLOUD_MODE_OPTS[@]}" $SOLR_LOG_LEVEL_OPT -Dsolr.log.dir="$SOLR_LOGS_DIR" \
"-Djetty.port=$SOLR_PORT" "-DSTOP.PORT=$stop_port" "-DSTOP.KEY=$STOP_KEY" \
"${SOLR_HOST_ARG[@]}" "-Duser.timezone=$SOLR_TIMEZONE" \
"-Djetty.home=$SOLR_SERVER_DIR" "-Dsolr.solr.home=$SOLR_HOME" "-Dsolr.install.dir=$SOLR_TIP" \
@@ -1413,37 +1899,57 @@ function launch_solr() {
IN_CLOUD_MODE=" in SolrCloud mode"
fi
- mkdir -p "$SOLR_LOGS_DIR"
+ mkdir -p "$SOLR_LOGS_DIR" 2>/dev/null
+ if [ $? -ne 0 ]; then
+ echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR could not be created. Exiting"
+ exit 1
+ fi
+ if [ ! -w "$SOLR_LOGS_DIR" ]; then
+ echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR is not writable. Exiting"
+ exit 1
+ fi
+ case "$SOLR_LOGS_DIR" in
+ contexts|etc|lib|modules|resources|scripts|solr|solr-webapp)
+ echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR is invalid. Reserved for the system. Exiting"
+ exit 1
+ ;;
+ esac
if [ "$run_in_foreground" == "true" ]; then
- echo -e "\nStarting Solr$IN_CLOUD_MODE on port $SOLR_PORT from $SOLR_SERVER_DIR\n"
- exec "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -jar start.jar "${SOLR_JETTY_CONFIG[@]}"
+ exec "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" -jar start.jar "${SOLR_JETTY_CONFIG[@]}"
else
# run Solr in the background
- nohup "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" \
+ nohup "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" -Dsolr.log.muteconsole \
"-XX:OnOutOfMemoryError=$SOLR_TIP/bin/oom_solr.sh $SOLR_PORT $SOLR_LOGS_DIR" \
-jar start.jar "${SOLR_JETTY_CONFIG[@]}" \
1>"$SOLR_LOGS_DIR/solr-$SOLR_PORT-console.log" 2>&1 & echo $! > "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
+ # check if /proc/sys/kernel/random/entropy_avail exists then check output of cat /proc/sys/kernel/random/entropy_avail to see if less than 300
+ if [[ -f /proc/sys/kernel/random/entropy_avail ]] && (( `cat /proc/sys/kernel/random/entropy_avail` < 300)); then
+ echo "Warning: Available entropy is low. As a result, use of the UUIDField, SSL, or any other features that require"
+ echo "RNG might not work properly. To check for the amount of available entropy, use 'cat /proc/sys/kernel/random/entropy_avail'."
+ echo ""
+ fi
# no lsof on cygwin though
if hash lsof 2>/dev/null ; then # hash returns true if lsof is on the path
- echo -n "Waiting up to 30 seconds to see Solr running on port $SOLR_PORT"
+ echo -n "Waiting up to $SOLR_STOP_WAIT seconds to see Solr running on port $SOLR_PORT"
# Launch in a subshell to show the spinner
(loops=0
while true
do
running=`lsof -PniTCP:$SOLR_PORT -sTCP:LISTEN`
if [ -z "$running" ]; then
- if [ $loops -lt 6 ]; then
- sleep 5
+ slept=$((loops * 2))
+ if [ $slept -lt $SOLR_STOP_WAIT ]; then
+ sleep 2
loops=$[$loops+1]
else
- echo -e "Still not seeing Solr listening on $SOLR_PORT after 30 seconds!"
+ echo -e "Still not seeing Solr listening on $SOLR_PORT after $SOLR_STOP_WAIT seconds!"
tail -30 "$SOLR_LOGS_DIR/solr.log"
exit # subshell!
fi
else
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
echo -e "\nStarted Solr server on port $SOLR_PORT (pid=$SOLR_PID). Happy searching!\n"
exit # subshell!
fi
@@ -1452,7 +1958,7 @@ function launch_solr() {
else
echo -e "NOTE: Please install lsof as this script needs it to determine if Solr is listening on port $SOLR_PORT."
sleep 10
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
echo -e "\nStarted Solr server on port $SOLR_PORT (pid=$SOLR_PID). Happy searching!\n"
return;
fi
diff --git a/ambari-infra-manager/.gitignore b/ambari-infra-manager/.gitignore
new file mode 100644
index 00000000..ba4e51d6
--- /dev/null
+++ b/ambari-infra-manager/.gitignore
@@ -0,0 +1,2 @@
+out/*
+*.pid
\ No newline at end of file
diff --git a/ambari-infra-manager/README.md b/ambari-infra-manager/README.md
new file mode 100644
index 00000000..4e38a69c
--- /dev/null
+++ b/ambari-infra-manager/README.md
@@ -0,0 +1,117 @@
+
+
+# Ambari Infra Manager
+
+## Overview
+
+Ambari Infra Manager is a REST based management application for Ambari Infra services (like Infra Solr). The API is built on top of [Spring Batch](http://docs.spring.io/spring-batch/reference/html/)
+
+### Architecture
+![batch-1](docs/images/batch-1.png)
+
+### Job execution overview
+![batch-2](docs/images/batch-2.png)
+
+### Job workflow
+![batch-3](docs/images/batch-3.png)
+
+### Step workflow
+![batch-4](docs/images/batch-4.png)
+
+(images originally from [here](http://docs.spring.io/spring-batch/reference/html/))
+
+## API documentation
+
+Infra Manager uses [Swagger](http://swagger.io/), generated yaml file can be downloaded from [here](docs/api/swagger.yaml)
+
+
+## Development guide
+
+### Adding a new custom job
+
+As Infra Manager is a Spring based application and using Java configurations, if it is needed to add a new custom Job, the Jobs/Steps/Configurations are need to be on the classpath. Spring beans are registered only in a specific package, so for writing a plugin, all the added Java classes needs to be added inside "org.apache.ambari.infra" package.
+
+For the plugin it will be needed to add all Spring & Spring batch dependencies. For adding a new Job you will need to define a new Configuration object. There you can define your own jobs/steps/writers/readers/processors, as you can see in that example:
+```java
+@Configuration
+@EnableBatchProcessing
+public class MyJobConfig {
+
+ @Inject
+ private StepBuilderFactory steps;
+
+ @Inject
+ private JobBuilderFactory jobs;
+
+
+ @Bean(name = "dummyStep")
+ protected Step dummyStep(ItemReader reader,
+ ItemProcessor processor,
+ ItemWriter writer) {
+ return steps.get("dummyStep").listener(new DummyStepListener()). chunk(2)
+ .reader(reader).processor(processor).writer(writer).build();
+ }
+
+ @Bean(name = "dummyJob")
+ public Job job(@Qualifier("dummyStep") Step dummyStep) {
+ return jobs.get("dummyJob").listener(new DummyJobListener()).start(dummyStep).build();
+ }
+
+}
+```
+As you can see it will require to implement [ItemWriter](https://docs.spring.io/spring-batch/apidocs/org/springframework/batch/item/ItemWriter.html), [ItemReader](http://docs.spring.io/spring-batch/trunk/apidocs/org/springframework/batch/item/ItemReader.html) and [ItemProcessor](https://docs.spring.io/spring-batch/apidocs/org/springframework/batch/item/ItemProcessor.html)
+
+### Schedule custom jobs
+
+It can be needed based on business requirements to schedule jobs (e.g. daily) instead of run manually through the REST API. It can be done with adding a custom bean to "org.apache.ambari.infra" package with using [@Scheduled](http://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/annotation/Scheduled.html):
+```java
+@Named
+public class MySchedulerObject {
+
+ @Inject
+ private JobService jobService; // or JobOperator jobOperator if spring-batch-admin manager dependecy is not included
+
+ @Value("${infra-manager.batch.my.param:defaultString}")
+ private String myParamFromLogSearchProperties;
+
+ @Scheduled(cron = "*/5 * * * * MON-FRI")
+ public void doSomething() {
+ // setup job params
+ jobService.launch(jobName, jobParameters, TimeZone.getDefault());
+ }
+
+ @Scheduled(cron = "${infra.manager.my.prop}")
+ public void doSomethingBasedOnInfraProperty() {
+ // do something ...
+ }
+}
+```
+
+You can put your cron expression inside infra-manager.properties file just make it configuratble.
+### Build & Run Application
+```bash
+mvn clean package exec:java
+```
+
+### Build & Run Application in docker container
+```bash
+cd docker
+./infra-manager-docker.sh
+```
\ No newline at end of file
diff --git a/ambari-infra-manager/build.xml b/ambari-infra-manager/build.xml
new file mode 100644
index 00000000..3d0f4da8
--- /dev/null
+++ b/ambari-infra-manager/build.xml
@@ -0,0 +1,54 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ambari-infra-manager/docker/Dockerfile b/ambari-infra-manager/docker/Dockerfile
new file mode 100644
index 00000000..adb584ac
--- /dev/null
+++ b/ambari-infra-manager/docker/Dockerfile
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM centos:centos6
+
+RUN echo root:changeme | chpasswd
+
+RUN yum clean all -y && yum update -y
+RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server openssh-clients ntp git httpd lsof
+RUN rpm -e --nodeps --justdb glibc-common
+RUN yum -y install glibc-common
+
+ENV HOME /root
+
+#Install JAVA
+ENV JAVA_VERSION 8u31
+ENV BUILD_VERSION b13
+RUN wget --no-cookies --no-check-certificate --header "Cookie: oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/$JAVA_VERSION-$BUILD_VERSION/jdk-$JAVA_VERSION-linux-x64.rpm" -O jdk-8-linux-x64.rpm
+RUN rpm -ivh jdk-8-linux-x64.rpm
+ENV JAVA_HOME /usr/java/default/
+
+#Install Maven
+RUN mkdir -p /opt/maven
+WORKDIR /opt/maven
+RUN wget http://archive.apache.org/dist/maven/maven-3/3.3.1/binaries/apache-maven-3.3.1-bin.tar.gz
+RUN tar -xvzf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+RUN rm -rf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+
+ENV M2_HOME /opt/maven/apache-maven-3.3.1
+ENV MAVEN_OPTS -Xmx2048m
+ENV PATH $PATH:$JAVA_HOME/bin:$M2_HOME/bin
+
+# SSH key
+RUN ssh-keygen -f /root/.ssh/id_rsa -t rsa -N ''
+RUN cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
+RUN chmod 600 /root/.ssh/authorized_keys
+RUN sed -ri 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config
+
+ADD bin/start.sh /root/start.sh
+RUN chmod +x /root/start.sh
+
+WORKDIR /root
+CMD /root/start.sh
\ No newline at end of file
diff --git a/ambari-infra-manager/docker/bin/start.sh b/ambari-infra-manager/docker/bin/start.sh
new file mode 100755
index 00000000..24027787
--- /dev/null
+++ b/ambari-infra-manager/docker/bin/start.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+export INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+touch /root/infra-manager.log
+/root/ambari-infra-manager/infraManager.sh > /root/infra-manager.log
+tail -f /root/infra-manager.log
+
diff --git a/ambari-infra-manager/docker/infra-manager-docker.sh b/ambari-infra-manager/docker/infra-manager-docker.sh
new file mode 100755
index 00000000..87d6b8aa
--- /dev/null
+++ b/ambari-infra-manager/docker/infra-manager-docker.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+sdir="`dirname \"$0\"`"
+: ${1:?"argument is missing: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"}
+command="$1"
+
+function build_infra_manager_container() {
+ pushd $sdir
+ docker build -t ambari-infra-manager:v1.0 .
+ popd
+}
+
+function build_infra_manager_project() {
+ pushd $sdir/../
+ mvn clean package -DskipTests
+ popd
+}
+
+function kill_infra_manager_container() {
+ echo "Try to remove infra manager container if exists ..."
+ docker rm -f infra-manager
+}
+
+function start_infra_manager_container() {
+ echo "Start infra manager container ..."
+ pushd $sdir/../
+ local AMBARI_INFRA_MANAGER_LOCATION=$(pwd)
+ popd
+ kill_infra_manager_container
+ docker run -d --name infra-manager --hostname infra-manager.apache.org \
+ -v $AMBARI_INFRA_MANAGER_LOCATION/target/package:/root/ambari-infra-manager -p 61890:61890 -p 5007:5007 \
+ ambari-infra-manager:v1.0
+ ip_address=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' logsearch)
+ echo "Ambari Infra Manager container started on $ip_address (for Mac OSX route to boot2docker/docker-machine VM address, e.g.: 'sudo route add -net 172.17.0.0/16 192.168.59.103')"
+ echo "You can follow Log Search logs with 'docker logs -f infra-manager' command"
+}
+
+case $command in
+ "build-and-run")
+ build_infra_manager_project
+ build_infra_manager_container
+ start_infra_manager_container
+ ;;
+ "build")
+ build_infra_manager_project
+ start_infra_manager_container
+ ;;
+ "build-docker-and-run")
+ build_infra_manager_container
+ start_infra_manager_container
+ ;;
+ "build-mvn-and-run")
+ build_infra_manager_project
+ build_infra_manager_container
+ ;;
+ "build-docker-only")
+ build_infra_manager_container
+ ;;
+ "build-mvn-only")
+ build_infra_manager_project
+ ;;
+ "start")
+ start_infra_manager_container
+ ;;
+ "stop")
+ kill_infra_manager_container
+ ;;
+ *)
+ echo "Available commands: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"
+ ;;
+esac
\ No newline at end of file
diff --git a/ambari-infra-manager/docs/api/swagger.yaml b/ambari-infra-manager/docs/api/swagger.yaml
new file mode 100644
index 00000000..824629f2
--- /dev/null
+++ b/ambari-infra-manager/docs/api/swagger.yaml
@@ -0,0 +1,784 @@
+---
+swagger: "2.0"
+info:
+ description: "Manager component for Ambari Infra"
+ version: "1.0.0"
+ title: "Infra Manager REST API"
+ license:
+ name: "Apache 2.0"
+ url: "http://www.apache.org/licenses/LICENSE-2.0.html"
+basePath: "/api/v1"
+tags:
+- name: "jobs"
+schemes:
+- "http"
+- "https"
+paths:
+ /jobs:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get all jobs"
+ description: ""
+ operationId: "getAllJobs"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "page"
+ in: "query"
+ required: false
+ type: "integer"
+ default: 0
+ format: "int32"
+ - name: "size"
+ in: "query"
+ required: false
+ type: "integer"
+ default: 20
+ format: "int32"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/JobInfo"
+ /jobs/executions:
+ delete:
+ tags:
+ - "jobs"
+ summary: "Stop all job executions."
+ description: ""
+ operationId: "stopAll"
+ produces:
+ - "application/json"
+ parameters: []
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ type: "integer"
+ format: "int32"
+ /jobs/executions/{jobExecutionId}:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get job and step details for job execution instance."
+ description: ""
+ operationId: "getExectionInfo"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/JobExecutionDetailsResponse"
+ delete:
+ tags:
+ - "jobs"
+ summary: "Stop or abandon a running job execution."
+ description: ""
+ operationId: "stopOrAbandonJobExecution"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ - name: "operation"
+ in: "query"
+ required: true
+ type: "string"
+ enum:
+ - "STOP"
+ - "ABANDON"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/JobExecutionInfoResponse"
+ /jobs/executions/{jobExecutionId}/context:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get execution context for specific job."
+ description: ""
+ operationId: "getExecutionContextByJobExecId"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/ExecutionContextResponse"
+ /jobs/executions/{jobExecutionId}/steps/{stepExecutionId}:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get step execution details."
+ description: ""
+ operationId: "getStepExecution"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ - name: "stepExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/StepExecutionInfoResponse"
+ /jobs/executions/{jobExecutionId}/steps/{stepExecutionId}/execution-context:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get the execution context of step execution."
+ description: ""
+ operationId: "getStepExecutionContext"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ - name: "stepExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/StepExecutionContextResponse"
+ /jobs/executions/{jobExecutionId}/steps/{stepExecutionId}/progress:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get progress of step execution."
+ description: ""
+ operationId: "getStepExecutionProgress"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ - name: "stepExecutionId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/StepExecutionProgressResponse"
+ /jobs/info/names:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get all job names"
+ description: ""
+ operationId: "getAllJobNames"
+ produces:
+ - "application/json"
+ parameters: []
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ type: "array"
+ uniqueItems: true
+ items:
+ type: "string"
+ /jobs/{jobName}:
+ post:
+ tags:
+ - "jobs"
+ summary: "Start a new job instance by job name."
+ description: ""
+ operationId: "startJob"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobName"
+ in: "path"
+ required: true
+ type: "string"
+ - name: "params"
+ in: "query"
+ required: false
+ type: "string"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/JobExecutionInfoResponse"
+ /jobs/{jobName}/executions:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get the id values of all the running job instances."
+ description: ""
+ operationId: "getExecutionIdsByJobName"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobName"
+ in: "path"
+ required: true
+ type: "string"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ type: "array"
+ uniqueItems: true
+ items:
+ type: "integer"
+ format: "int64"
+ /jobs/{jobName}/info:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get job details by job name."
+ description: ""
+ operationId: "getJobDetails"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "page"
+ in: "query"
+ required: false
+ type: "integer"
+ default: 0
+ format: "int32"
+ - name: "size"
+ in: "query"
+ required: false
+ type: "integer"
+ default: 20
+ format: "int32"
+ - name: "jobName"
+ in: "path"
+ required: true
+ type: "string"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/JobDetailsResponse"
+ /jobs/{jobName}/{jobInstanceId}/executions:
+ get:
+ tags:
+ - "jobs"
+ summary: "Get execution for job instance."
+ description: ""
+ operationId: "getExecutionsForInstance"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "jobName"
+ in: "path"
+ required: true
+ type: "string"
+ - name: "jobInstanceId"
+ in: "path"
+ required: true
+ type: "integer"
+ format: "int64"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/JobExecutionInfoResponse"
+ post:
+ tags:
+ - "jobs"
+ summary: "Restart job instance."
+ description: ""
+ operationId: "restartJobInstance"
+ produces:
+ - "application/json"
+ parameters:
+ - in: "body"
+ name: "body"
+ required: false
+ schema:
+ $ref: "#/definitions/JobExecutionRestartRequest"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/JobExecutionInfoResponse"
+definitions:
+ JobExecutionData:
+ type: "object"
+ properties:
+ id:
+ type: "integer"
+ format: "int64"
+ executionContext:
+ $ref: "#/definitions/ExecutionContext"
+ jobInstance:
+ $ref: "#/definitions/JobInstance"
+ jobId:
+ type: "integer"
+ format: "int64"
+ jobParameters:
+ $ref: "#/definitions/JobParameters"
+ failureExceptions:
+ type: "array"
+ items:
+ $ref: "#/definitions/Throwable"
+ endTime:
+ type: "string"
+ format: "date-time"
+ exitStatus:
+ $ref: "#/definitions/ExitStatus"
+ createTime:
+ type: "string"
+ format: "date-time"
+ lastUpdated:
+ type: "string"
+ format: "date-time"
+ jobConfigurationName:
+ type: "string"
+ startTime:
+ type: "string"
+ format: "date-time"
+ status:
+ type: "string"
+ enum:
+ - "COMPLETED"
+ - "STARTING"
+ - "STARTED"
+ - "STOPPING"
+ - "STOPPED"
+ - "FAILED"
+ - "ABANDONED"
+ - "UNKNOWN"
+ stepExecutionDataList:
+ type: "array"
+ items:
+ $ref: "#/definitions/StepExecutionData"
+ JobInstance:
+ type: "object"
+ properties:
+ id:
+ type: "integer"
+ format: "int64"
+ version:
+ type: "integer"
+ format: "int32"
+ jobName:
+ type: "string"
+ instanceId:
+ type: "integer"
+ format: "int64"
+ StepExecutionData:
+ type: "object"
+ properties:
+ id:
+ type: "integer"
+ format: "int64"
+ jobExecutionId:
+ type: "integer"
+ format: "int64"
+ executionContext:
+ $ref: "#/definitions/ExecutionContext"
+ stepName:
+ type: "string"
+ terminateOnly:
+ type: "boolean"
+ default: false
+ failureExceptions:
+ type: "array"
+ items:
+ $ref: "#/definitions/Throwable"
+ endTime:
+ type: "string"
+ format: "date-time"
+ exitStatus:
+ $ref: "#/definitions/ExitStatus"
+ lastUpdated:
+ type: "string"
+ format: "date-time"
+ commitCount:
+ type: "integer"
+ format: "int32"
+ readCount:
+ type: "integer"
+ format: "int32"
+ filterCount:
+ type: "integer"
+ format: "int32"
+ writeCount:
+ type: "integer"
+ format: "int32"
+ readSkipCount:
+ type: "integer"
+ format: "int32"
+ writeSkipCount:
+ type: "integer"
+ format: "int32"
+ processSkipCount:
+ type: "integer"
+ format: "int32"
+ rollbackCount:
+ type: "integer"
+ format: "int32"
+ startTime:
+ type: "string"
+ format: "date-time"
+ status:
+ type: "string"
+ enum:
+ - "COMPLETED"
+ - "STARTING"
+ - "STARTED"
+ - "STOPPING"
+ - "STOPPED"
+ - "FAILED"
+ - "ABANDONED"
+ - "UNKNOWN"
+ StackTraceElement:
+ type: "object"
+ properties:
+ methodName:
+ type: "string"
+ fileName:
+ type: "string"
+ lineNumber:
+ type: "integer"
+ format: "int32"
+ className:
+ type: "string"
+ nativeMethod:
+ type: "boolean"
+ default: false
+ JobExecutionDetailsResponse:
+ type: "object"
+ properties:
+ jobExecutionInfoResponse:
+ $ref: "#/definitions/JobExecutionInfoResponse"
+ stepExecutionInfoList:
+ type: "array"
+ items:
+ $ref: "#/definitions/StepExecutionInfoResponse"
+ StepExecutionContextResponse:
+ type: "object"
+ properties:
+ executionContextMap:
+ type: "object"
+ additionalProperties:
+ type: "object"
+ jobExecutionId:
+ type: "integer"
+ format: "int64"
+ stepExecutionId:
+ type: "integer"
+ format: "int64"
+ stepName:
+ type: "string"
+ StepExecutionProgress:
+ type: "object"
+ properties:
+ estimatedPercentCompleteMessage:
+ $ref: "#/definitions/MessageSourceResolvable"
+ estimatedPercentComplete:
+ type: "number"
+ format: "double"
+ ExitStatus:
+ type: "object"
+ properties:
+ exitCode:
+ type: "string"
+ exitDescription:
+ type: "string"
+ running:
+ type: "boolean"
+ default: false
+ ExecutionContextResponse:
+ type: "object"
+ properties:
+ jobExecutionId:
+ type: "integer"
+ format: "int64"
+ executionContextMap:
+ type: "object"
+ additionalProperties:
+ type: "object"
+ StepExecutionHistory:
+ type: "object"
+ properties:
+ stepName:
+ type: "string"
+ count:
+ type: "integer"
+ format: "int32"
+ commitCount:
+ $ref: "#/definitions/CumulativeHistory"
+ rollbackCount:
+ $ref: "#/definitions/CumulativeHistory"
+ readCount:
+ $ref: "#/definitions/CumulativeHistory"
+ writeCount:
+ $ref: "#/definitions/CumulativeHistory"
+ filterCount:
+ $ref: "#/definitions/CumulativeHistory"
+ readSkipCount:
+ $ref: "#/definitions/CumulativeHistory"
+ writeSkipCount:
+ $ref: "#/definitions/CumulativeHistory"
+ processSkipCount:
+ $ref: "#/definitions/CumulativeHistory"
+ duration:
+ $ref: "#/definitions/CumulativeHistory"
+ durationPerRead:
+ $ref: "#/definitions/CumulativeHistory"
+ TimeZone:
+ type: "object"
+ properties:
+ displayName:
+ type: "string"
+ id:
+ type: "string"
+ dstsavings:
+ type: "integer"
+ format: "int32"
+ rawOffset:
+ type: "integer"
+ format: "int32"
+ MessageSourceResolvable:
+ type: "object"
+ properties:
+ arguments:
+ type: "array"
+ items:
+ type: "object"
+ codes:
+ type: "array"
+ items:
+ type: "string"
+ defaultMessage:
+ type: "string"
+ ExecutionContext:
+ type: "object"
+ properties:
+ dirty:
+ type: "boolean"
+ default: false
+ empty:
+ type: "boolean"
+ default: false
+ StepExecutionInfoResponse:
+ type: "object"
+ properties:
+ id:
+ type: "integer"
+ format: "int64"
+ jobExecutionId:
+ type: "integer"
+ format: "int64"
+ jobName:
+ type: "string"
+ name:
+ type: "string"
+ startDate:
+ type: "string"
+ startTime:
+ type: "string"
+ duration:
+ type: "string"
+ durationMillis:
+ type: "integer"
+ format: "int64"
+ exitCode:
+ type: "string"
+ status:
+ type: "string"
+ JobExecutionInfoResponse:
+ type: "object"
+ properties:
+ id:
+ type: "integer"
+ format: "int64"
+ stepExecutionCount:
+ type: "integer"
+ format: "int32"
+ jobId:
+ type: "integer"
+ format: "int64"
+ jobName:
+ type: "string"
+ startDate:
+ type: "string"
+ startTime:
+ type: "string"
+ duration:
+ type: "string"
+ jobExecutionData:
+ $ref: "#/definitions/JobExecutionData"
+ jobParameters:
+ type: "object"
+ additionalProperties:
+ type: "object"
+ jobParametersString:
+ type: "string"
+ restartable:
+ type: "boolean"
+ default: false
+ abandonable:
+ type: "boolean"
+ default: false
+ stoppable:
+ type: "boolean"
+ default: false
+ timeZone:
+ $ref: "#/definitions/TimeZone"
+ JobInfo:
+ type: "object"
+ properties:
+ name:
+ type: "string"
+ executionCount:
+ type: "integer"
+ format: "int32"
+ launchable:
+ type: "boolean"
+ default: false
+ incrementable:
+ type: "boolean"
+ default: false
+ jobInstanceId:
+ type: "integer"
+ format: "int64"
+ JobExecutionRestartRequest:
+ type: "object"
+ properties:
+ jobName:
+ type: "string"
+ jobInstanceId:
+ type: "integer"
+ format: "int64"
+ operation:
+ type: "string"
+ enum:
+ - "RESTART"
+ Throwable:
+ type: "object"
+ properties:
+ cause:
+ $ref: "#/definitions/Throwable"
+ stackTrace:
+ type: "array"
+ items:
+ $ref: "#/definitions/StackTraceElement"
+ message:
+ type: "string"
+ localizedMessage:
+ type: "string"
+ suppressed:
+ type: "array"
+ items:
+ $ref: "#/definitions/Throwable"
+ JobParameters:
+ type: "object"
+ properties:
+ parameters:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/JobParameter"
+ empty:
+ type: "boolean"
+ default: false
+ CumulativeHistory:
+ type: "object"
+ properties:
+ count:
+ type: "integer"
+ format: "int32"
+ min:
+ type: "number"
+ format: "double"
+ max:
+ type: "number"
+ format: "double"
+ standardDeviation:
+ type: "number"
+ format: "double"
+ mean:
+ type: "number"
+ format: "double"
+ JobInstanceDetailsResponse:
+ type: "object"
+ properties:
+ jobInstance:
+ $ref: "#/definitions/JobInstance"
+ jobExecutionInfoResponseList:
+ type: "array"
+ items:
+ $ref: "#/definitions/JobExecutionInfoResponse"
+ JobParameter:
+ type: "object"
+ properties:
+ identifying:
+ type: "boolean"
+ default: false
+ value:
+ type: "object"
+ type:
+ type: "string"
+ enum:
+ - "STRING"
+ - "DATE"
+ - "LONG"
+ - "DOUBLE"
+ StepExecutionProgressResponse:
+ type: "object"
+ properties:
+ stepExecutionProgress:
+ $ref: "#/definitions/StepExecutionProgress"
+ stepExecutionHistory:
+ $ref: "#/definitions/StepExecutionHistory"
+ stepExecutionInfoResponse:
+ $ref: "#/definitions/StepExecutionInfoResponse"
+ JobDetailsResponse:
+ type: "object"
+ properties:
+ jobInfo:
+ $ref: "#/definitions/JobInfo"
+ jobInstanceDetailsResponseList:
+ type: "array"
+ items:
+ $ref: "#/definitions/JobInstanceDetailsResponse"
diff --git a/ambari-infra-manager/docs/images/batch-1.png b/ambari-infra-manager/docs/images/batch-1.png
new file mode 100644
index 00000000..d763852c
Binary files /dev/null and b/ambari-infra-manager/docs/images/batch-1.png differ
diff --git a/ambari-infra-manager/docs/images/batch-2.png b/ambari-infra-manager/docs/images/batch-2.png
new file mode 100644
index 00000000..1de34795
Binary files /dev/null and b/ambari-infra-manager/docs/images/batch-2.png differ
diff --git a/ambari-infra-manager/docs/images/batch-3.png b/ambari-infra-manager/docs/images/batch-3.png
new file mode 100644
index 00000000..7f1123c7
Binary files /dev/null and b/ambari-infra-manager/docs/images/batch-3.png differ
diff --git a/ambari-infra-manager/docs/images/batch-4.png b/ambari-infra-manager/docs/images/batch-4.png
new file mode 100644
index 00000000..beb610ad
Binary files /dev/null and b/ambari-infra-manager/docs/images/batch-4.png differ
diff --git a/ambari-infra-manager/pom.xml b/ambari-infra-manager/pom.xml
new file mode 100644
index 00000000..aa86da82
--- /dev/null
+++ b/ambari-infra-manager/pom.xml
@@ -0,0 +1,422 @@
+
+
+
+
+ ambari-infra
+ org.apache.ambari
+ 2.0.0.0-SNAPSHOT
+
+ Ambari Infra Manager
+ http://maven.apache.org
+ 4.0.0
+
+ ambari-infra-manager
+
+
+ 4.3.10.RELEASE
+ 4.2.3.RELEASE
+ 2.0.4.RELEASE
+ 2.25.1
+ 9.4.6.v20170531
+ 3.0.7.RELEASE
+ 3.8.11.2
+ 2.0.2.RELEASE
+ 1.5.6.RELEASE
+ 1.5.16
+ 0.6.0
+
+
+
+ ambari-infra-manager_${project.version}
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.0
+
+ ${jdk.version}
+ ${jdk.version}
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.2.1
+
+
+
+ java
+
+
+
+
+ org.apache.ambari.infra.InfraManager
+
+
+
+ org.springframework.boot
+ spring-boot-maven-plugin
+ ${spring-boot.version}
+
+ exec
+
+
+
+
+ repackage
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+ 2.8
+
+
+ copy-dependencies
+ package
+
+ copy-dependencies
+
+
+ true
+ ${basedir}/target/libs
+ false
+ false
+ true
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+ 1.7
+
+
+ package
+
+
+
+
+
+
+
+
+ run
+
+
+
+
+
+
+
+
+
+ junit
+ junit
+ test
+
+
+ org.easymock
+ easymock
+ 3.4
+ test
+
+
+
+ org.springframework
+ spring-beans
+ ${spring.version}
+
+
+ org.springframework
+ spring-context
+ ${spring.version}
+
+
+ org.springframework
+ spring-test
+ ${spring.version}
+
+
+
+ org.springframework.security
+ spring-security-web
+ ${spring.security.version}
+
+
+ org.springframework.security
+ spring-security-core
+ ${spring.security.version}
+
+
+ org.springframework.security
+ spring-security-config
+ ${spring.security.version}
+
+
+ org.springframework.security
+ spring-security-ldap
+ ${spring.security.version}
+
+
+
+ org.springframework.boot
+ spring-boot-starter
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-logging
+
+
+
+
+ org.springframework.boot
+ spring-boot-starter-log4j2
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-web
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-security
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-actuator
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-jetty
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-jersey
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-freemarker
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-autoconfigure
+ ${spring-boot.version}
+
+
+ org.springframework.boot
+ spring-boot-configuration-processor
+ ${spring-boot.version}
+
+
+ org.glassfish.jersey.media
+ jersey-media-json-jettison
+ ${jersey.version}
+
+
+
+ org.apache.solr
+ solr-solrj
+ ${solr.version}
+
+
+ org.apache.solr
+ solr-core
+ ${solr.version}
+
+
+ *
+ *
+
+
+
+
+ org.apache.lucene
+ lucene-core
+ ${solr.version}
+
+
+ org.apache.lucene
+ lucene-analyzers-common
+ ${solr.version}
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ 2.7.0
+
+
+ javax.servlet
+ servlet-api
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ com.sun.jersey
+ jetty-util
+
+
+ com.sun.jersey
+ jersey-core
+
+
+ com.sun.jersey
+ jersey-json
+
+
+ com.sun.jersey
+ jersey-server
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
+
+ commons-io
+ commons-io
+ 2.4
+
+
+ commons-cli
+ commons-cli
+ 1.3.1
+
+
+ commons-codec
+ commons-codec
+
+
+ commons-lang
+ commons-lang
+
+
+ org.springframework.security.kerberos
+ spring-security-kerberos-core
+ 1.0.1.RELEASE
+
+
+ org.springframework.security.kerberos
+ spring-security-kerberos-web
+ 1.0.1.RELEASE
+
+
+ org.springframework.security.kerberos
+ spring-security-kerberos-client
+ 1.0.1.RELEASE
+
+
+ cglib
+ cglib
+ 3.2.4
+
+
+ io.swagger
+ swagger-annotations
+ ${swagger.version}
+
+
+ io.swagger
+ swagger-core
+ ${swagger.version}
+
+
+ io.swagger
+ swagger-jersey2-jaxrs
+ ${swagger.version}
+
+
+ javax.ws.rs
+ jsr311-api
+
+
+
+
+ io.swagger
+ swagger-models
+ ${swagger.version}
+
+
+ org.webjars
+ swagger-ui
+ 2.1.0
+
+
+ org.springframework
+ spring-context-support
+ ${spring.version}
+
+
+ org.springframework.batch
+ spring-batch-core
+ ${spring-batch.version}
+
+
+ org.springframework
+ spring-jdbc
+ ${spring.version}
+
+
+ io.jsonwebtoken
+ jjwt
+ ${jjwt.version}
+
+
+ org.xerial
+ sqlite-jdbc
+ ${sqlite.version}
+
+
+ org.springframework.batch
+ spring-batch-admin-manager
+ 1.3.1.RELEASE
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
+
+ guava
+ com.google.guava
+ 20.0
+
+
+
+
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
new file mode 100644
index 00000000..185e344f
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra;
+
+import org.springframework.boot.Banner;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.autoconfigure.batch.BatchAutoConfiguration;
+import org.springframework.boot.autoconfigure.data.rest.RepositoryRestMvcAutoConfiguration;
+import org.springframework.boot.autoconfigure.security.SecurityAutoConfiguration;
+import org.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration;
+import org.springframework.boot.builder.SpringApplicationBuilder;
+import org.springframework.boot.system.ApplicationPidFileWriter;
+
+@SpringBootApplication(
+ scanBasePackages = {"org.apache.ambari.infra"},
+ exclude = {
+ RepositoryRestMvcAutoConfiguration.class,
+ WebMvcAutoConfiguration.class,
+ BatchAutoConfiguration.class,
+ SecurityAutoConfiguration.class
+ }
+)
+public class InfraManager {
+
+ public static void main(String[] args) {
+ String pidFile = System.getenv("PID_FILE") == null ? "infra-manager.pid" : System.getenv("PID_FILE");
+ new SpringApplicationBuilder(InfraManager.class)
+ .bannerMode(Banner.Mode.OFF)
+ .listeners(new ApplicationPidFileWriter(pidFile))
+ .web(true)
+ .run(args);
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerApiDocConfig.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerApiDocConfig.java
new file mode 100644
index 00000000..4c76742e
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerApiDocConfig.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import io.swagger.jaxrs.config.BeanConfig;
+import io.swagger.jaxrs.listing.ApiListingResource;
+import io.swagger.jaxrs.listing.SwaggerSerializers;
+import io.swagger.models.Info;
+import io.swagger.models.License;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+public class InfraManagerApiDocConfig {
+
+ private static final String DESCRIPTION = "Manager component for Ambari Infra";
+ private static final String VERSION = "1.0.0";
+ private static final String TITLE = "Infra Manager REST API";
+ private static final String LICENSE = "Apache 2.0";
+ private static final String LICENSE_URL = "http://www.apache.org/licenses/LICENSE-2.0.html";
+ private static final String RESOURCE_PACKAGE = "org.apache.ambari.infra.rest";
+ private static final String BASE_PATH = "/api/v1";
+
+ @Bean
+ public ApiListingResource apiListingResource() {
+ return new ApiListingResource();
+ }
+
+ @Bean
+ public SwaggerSerializers swaggerSerializers() {
+ return new SwaggerSerializers();
+ }
+
+ @Bean
+ public BeanConfig swaggerConfig() {
+ BeanConfig beanConfig = new BeanConfig();
+ beanConfig.setSchemes(new String[]{"http", "https"});
+ beanConfig.setBasePath(BASE_PATH);
+ beanConfig.setTitle(TITLE);
+ beanConfig.setDescription(DESCRIPTION);
+ beanConfig.setLicense(LICENSE);
+ beanConfig.setLicenseUrl(LICENSE_URL);
+ beanConfig.setScan(true);
+ beanConfig.setVersion(VERSION);
+ beanConfig.setResourcePackage(RESOURCE_PACKAGE);
+
+ License license = new License();
+ license.setName(LICENSE);
+ license.setUrl(LICENSE_URL);
+
+ Info info = new Info();
+ info.setDescription(DESCRIPTION);
+ info.setTitle(TITLE);
+ info.setVersion(VERSION);
+ info.setLicense(license);
+ beanConfig.setInfo(info);
+ return beanConfig;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerConfig.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerConfig.java
new file mode 100644
index 00000000..86059a24
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerConfig.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.ComponentScan;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.PropertySource;
+import org.springframework.context.support.PropertySourcesPlaceholderConfigurer;
+
+@Configuration
+@ComponentScan("org.apache.ambari.infra")
+@PropertySource(value = {"classpath:infra-manager.properties"})
+public class InfraManagerConfig {
+
+ @Bean
+ public static PropertySourcesPlaceholderConfigurer propertyConfigurer() {
+ return new PropertySourcesPlaceholderConfigurer();
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerDataConfig.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerDataConfig.java
new file mode 100644
index 00000000..b5b215e3
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerDataConfig.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+public class InfraManagerDataConfig {
+
+ @Value("${infra-manager.server.data.folder:/opt/ambari-infra-manager/data}")
+ private String dataFolder;
+
+ public String getDataFolder() {
+ return dataFolder;
+ }
+
+ public void setDataFolder(String dataFolder) {
+ this.dataFolder = dataFolder;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerJerseyResourceConfig.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerJerseyResourceConfig.java
new file mode 100644
index 00000000..3a4c00f8
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerJerseyResourceConfig.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import org.apache.ambari.infra.rest.JobResource;
+import org.glassfish.jersey.jackson.JacksonFeature;
+import org.glassfish.jersey.server.ResourceConfig;
+import org.glassfish.jersey.servlet.ServletProperties;
+
+import javax.ws.rs.ApplicationPath;
+
+@ApplicationPath("/api/v1")
+public class InfraManagerJerseyResourceConfig extends ResourceConfig {
+
+ public InfraManagerJerseyResourceConfig() {
+ packages(JobResource.class.getPackage().getName());
+ register(JacksonFeature.class);
+ property(ServletProperties.FILTER_FORWARD_ON_404, true);
+ }
+
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerServletConfig.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerServletConfig.java
new file mode 100644
index 00000000..06aea79b
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerServletConfig.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.servlet.DefaultServlet;
+import org.glassfish.jersey.servlet.ServletContainer;
+import org.glassfish.jersey.servlet.ServletProperties;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.web.ServerProperties;
+import org.springframework.boot.context.embedded.EmbeddedServletContainerFactory;
+import org.springframework.boot.context.embedded.jetty.JettyEmbeddedServletContainer;
+import org.springframework.boot.context.embedded.jetty.JettyEmbeddedServletContainerFactory;
+import org.springframework.boot.web.servlet.ServletRegistrationBean;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import javax.inject.Inject;
+
+@Configuration
+public class InfraManagerServletConfig {
+
+ private static final Integer SESSION_TIMEOUT = 60 * 30;
+ private static final String INFRA_MANAGER_SESSIONID = "INFRAMANAGER_SESSIONID";
+ private static final String INFRA_MANAGER_APPLICATION_NAME = "infra-manager";
+
+ @Value("${infra-manager.server.port:61890}")
+ private int port;
+
+ @Inject
+ private ServerProperties serverProperties;
+
+ @Inject
+ private InfraManagerDataConfig infraManagerDataConfig;
+
+
+ @Bean
+ public ServletRegistrationBean jerseyServlet() {
+ ServletRegistrationBean jerseyServletBean = new ServletRegistrationBean(new ServletContainer(), "/api/v1/*");
+ jerseyServletBean.addInitParameter(ServletProperties.JAXRS_APPLICATION_CLASS, InfraManagerJerseyResourceConfig.class.getName());
+ return jerseyServletBean;
+ }
+
+ @Bean
+ public ServletRegistrationBean dataServlet() {
+ ServletRegistrationBean dataServletBean = new ServletRegistrationBean(new DefaultServlet(), "/files/*");
+ dataServletBean.addInitParameter("dirAllowed","true");
+ dataServletBean.addInitParameter("pathInfoOnly","true");
+ dataServletBean.addInitParameter("resourceBase", infraManagerDataConfig.getDataFolder());
+ return dataServletBean;
+ }
+
+ @Bean
+ public EmbeddedServletContainerFactory containerFactory() {
+ final JettyEmbeddedServletContainerFactory jettyEmbeddedServletContainerFactory = new JettyEmbeddedServletContainerFactory() {
+ @Override
+ protected JettyEmbeddedServletContainer getJettyEmbeddedServletContainer(Server server) {
+ return new JettyEmbeddedServletContainer(server);
+ }
+ };
+ jettyEmbeddedServletContainerFactory.setSessionTimeout(SESSION_TIMEOUT);
+ serverProperties.getSession().getCookie().setName(INFRA_MANAGER_SESSIONID);
+ serverProperties.setDisplayName(INFRA_MANAGER_APPLICATION_NAME);
+ jettyEmbeddedServletContainerFactory.setPort(port);
+ return jettyEmbeddedServletContainerFactory;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/StaticResourceConfiguration.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/StaticResourceConfiguration.java
new file mode 100644
index 00000000..f0cd3cf3
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/StaticResourceConfiguration.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import org.springframework.context.annotation.Configuration;
+import org.springframework.web.servlet.config.annotation.EnableWebMvc;
+import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry;
+import org.springframework.web.servlet.config.annotation.ViewControllerRegistry;
+import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter;
+
+@EnableWebMvc
+@Configuration
+public class StaticResourceConfiguration extends WebMvcConfigurerAdapter {
+
+ private static final String[] CLASSPATH_RESOURCE_LOCATIONS = {
+ "classpath:/static/", "classpath:/swagger/","classpath:META-INF/resources/webjars/"
+ };
+
+ @Override
+ public void addResourceHandlers(ResourceHandlerRegistry registry) {
+ registry.addResourceHandler("/**")
+ .addResourceLocations(CLASSPATH_RESOURCE_LOCATIONS);
+ }
+
+ @Override
+ public void addViewControllers(ViewControllerRegistry registry) {
+ registry.addViewController("/").setViewName(
+ "forward:/index.html");
+ registry.addViewController("/docs").setViewName(
+ "forward:/swagger.html");
+ }
+
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
new file mode 100644
index 00000000..b1169b4f
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf.batch;
+
+import org.springframework.batch.admin.service.JdbcSearchableJobExecutionDao;
+import org.springframework.batch.admin.service.JdbcSearchableJobInstanceDao;
+import org.springframework.batch.admin.service.JdbcSearchableStepExecutionDao;
+import org.springframework.batch.admin.service.JobService;
+import org.springframework.batch.admin.service.SearchableJobExecutionDao;
+import org.springframework.batch.admin.service.SearchableJobInstanceDao;
+import org.springframework.batch.admin.service.SearchableStepExecutionDao;
+import org.springframework.batch.admin.service.SimpleJobService;
+import org.springframework.batch.core.configuration.JobRegistry;
+import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
+import org.springframework.batch.core.configuration.support.JobRegistryBeanPostProcessor;
+import org.springframework.batch.core.explore.JobExplorer;
+import org.springframework.batch.core.launch.JobLauncher;
+import org.springframework.batch.core.launch.JobOperator;
+import org.springframework.batch.core.launch.support.SimpleJobLauncher;
+import org.springframework.batch.core.launch.support.SimpleJobOperator;
+import org.springframework.batch.core.repository.ExecutionContextSerializer;
+import org.springframework.batch.core.repository.JobRepository;
+import org.springframework.batch.core.repository.dao.ExecutionContextDao;
+import org.springframework.batch.core.repository.dao.Jackson2ExecutionContextStringSerializer;
+import org.springframework.batch.core.repository.dao.JdbcExecutionContextDao;
+import org.springframework.batch.core.repository.support.JobRepositoryFactoryBean;
+import org.springframework.batch.support.transaction.ResourcelessTransactionManager;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.core.io.Resource;
+import org.springframework.core.task.SimpleAsyncTaskExecutor;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.datasource.DriverManagerDataSource;
+import org.springframework.jdbc.datasource.init.DataSourceInitializer;
+import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator;
+import org.springframework.scheduling.annotation.EnableAsync;
+import org.springframework.scheduling.annotation.EnableScheduling;
+import org.springframework.transaction.PlatformTransactionManager;
+
+import javax.inject.Inject;
+import javax.sql.DataSource;
+import java.net.MalformedURLException;
+
+@Configuration
+@EnableBatchProcessing
+@EnableScheduling
+@EnableAsync
+public class InfraManagerBatchConfig {
+
+ @Value("classpath:org/springframework/batch/core/schema-drop-sqlite.sql")
+ private Resource dropRepositoryTables;
+
+ @Value("classpath:org/springframework/batch/core/schema-sqlite.sql")
+ private Resource dataRepositorySchema;
+
+ @Value("${infra-manager.batch.db.init:false}")
+ private boolean dropDatabaseOnStartup;
+
+ @Value("${infra-manager.batch.db.file:/etc/ambari-inra-manager/conf/repository.db}")
+ private String sqliteDbFileLocation;
+
+ @Value("${infra-manager.batch.db.username}")
+ private String databaseUsername;
+
+ @Value("${infra-manager.batch.db.password}")
+ private String databasePassword;
+
+ @Inject
+ private JobRegistry jobRegistry;
+
+ @Inject
+ private JobExplorer jobExplorer;
+
+ @Bean
+ public DataSource dataSource() {
+ DriverManagerDataSource dataSource = new DriverManagerDataSource();
+ dataSource.setDriverClassName("org.sqlite.JDBC");
+ dataSource.setUrl("jdbc:sqlite:" + sqliteDbFileLocation);
+ dataSource.setUsername(databaseUsername);
+ dataSource.setPassword(databasePassword);
+ return dataSource;
+ }
+
+ @Bean
+ public DataSourceInitializer dataSourceInitializer(DataSource dataSource)
+ throws MalformedURLException {
+ ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator();
+ if (dropDatabaseOnStartup) {
+ databasePopulator.addScript(dropRepositoryTables);
+ databasePopulator.setIgnoreFailedDrops(true);
+ }
+ databasePopulator.addScript(dataRepositorySchema);
+ databasePopulator.setContinueOnError(true);
+
+ DataSourceInitializer initializer = new DataSourceInitializer();
+ initializer.setDataSource(dataSource);
+ initializer.setDatabasePopulator(databasePopulator);
+
+ return initializer;
+ }
+
+ @Bean
+ public ExecutionContextSerializer executionContextSerializer() {
+ return new Jackson2ExecutionContextStringSerializer();
+ }
+
+ @Bean
+ public JobRepository jobRepository() throws Exception {
+ JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean();
+ factory.setDataSource(dataSource());
+ factory.setTransactionManager(getTransactionManager());
+ factory.setSerializer(executionContextSerializer());
+ factory.afterPropertiesSet();
+ return factory.getObject();
+ }
+
+ @Bean
+ public PlatformTransactionManager getTransactionManager() {
+ return new ResourcelessTransactionManager();
+ }
+
+ @Bean(name = "jobLauncher")
+ public JobLauncher jobLauncher() throws Exception {
+ SimpleJobLauncher jobLauncher = new SimpleJobLauncher();
+ jobLauncher.setJobRepository(jobRepository());
+ jobLauncher.setTaskExecutor(new SimpleAsyncTaskExecutor());
+ jobLauncher.afterPropertiesSet();
+ return jobLauncher;
+ }
+
+ @Bean
+ public JobOperator jobOperator() throws Exception {
+ SimpleJobOperator jobOperator = new SimpleJobOperator();
+ jobOperator.setJobExplorer(jobExplorer);
+ jobOperator.setJobLauncher(jobLauncher());
+ jobOperator.setJobRegistry(jobRegistry);
+ jobOperator.setJobRepository(jobRepository());
+ return jobOperator;
+ }
+
+ @Bean
+ public JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor() {
+ JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor = new JobRegistryBeanPostProcessor();
+ jobRegistryBeanPostProcessor.setJobRegistry(jobRegistry);
+ return jobRegistryBeanPostProcessor;
+ }
+
+ @Bean
+ public JdbcTemplate jdbcTemplate() {
+ return new JdbcTemplate(dataSource());
+ }
+
+ @Bean
+ public SearchableJobInstanceDao searchableJobInstanceDao() {
+ JdbcSearchableJobInstanceDao dao = new JdbcSearchableJobInstanceDao();
+ dao.setJdbcTemplate(jdbcTemplate());
+ return dao;
+ }
+
+ @Bean
+ public SearchableJobExecutionDao searchableJobExecutionDao() {
+ JdbcSearchableJobExecutionDao dao = new JdbcSearchableJobExecutionDao();
+ dao.setJdbcTemplate(jdbcTemplate());
+ dao.setDataSource(dataSource());
+ return dao;
+ }
+
+ @Bean
+ public SearchableStepExecutionDao searchableStepExecutionDao() {
+ JdbcSearchableStepExecutionDao dao = new JdbcSearchableStepExecutionDao();
+ dao.setDataSource(dataSource());
+ dao.setJdbcTemplate(jdbcTemplate());
+ return dao;
+ }
+
+ @Bean
+ public ExecutionContextDao executionContextDao() {
+ JdbcExecutionContextDao dao = new JdbcExecutionContextDao();
+ dao.setSerializer(executionContextSerializer());
+ dao.setJdbcTemplate(jdbcTemplate());
+ return dao;
+ }
+
+ @Bean
+ public JobService jobService() throws Exception {
+ return new
+ SimpleJobService(searchableJobInstanceDao(), searchableJobExecutionDao(), searchableStepExecutionDao(),
+ jobRepository(), jobLauncher(), jobRegistry, executionContextDao());
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/doc/InfraManagerApiDocStorage.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/doc/InfraManagerApiDocStorage.java
new file mode 100644
index 00000000..e536d9a3
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/doc/InfraManagerApiDocStorage.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.doc;
+
+import io.swagger.jaxrs.config.BeanConfig;
+import io.swagger.models.Swagger;
+import io.swagger.util.Yaml;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.PostConstruct;
+import javax.inject.Inject;
+import javax.inject.Named;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+@Named
+public class InfraManagerApiDocStorage {
+
+ private static final Logger LOG = LoggerFactory.getLogger(InfraManagerApiDocStorage.class);
+
+ private final Map swaggerMap = new ConcurrentHashMap<>();
+
+ @Inject
+ private BeanConfig beanConfig;
+
+ @PostConstruct
+ private void postConstruct() {
+ Thread loadApiDocThread = new Thread("load_swagger_api_doc") {
+ @Override
+ public void run() {
+ LOG.info("Start thread to scan REST API doc from endpoints.");
+ Swagger swagger = beanConfig.getSwagger();
+ beanConfig.configure(swagger);
+ beanConfig.scanAndRead();
+ setSwagger(swagger);
+ try {
+ if (swagger != null) {
+ String yaml = Yaml.mapper().writeValueAsString(swagger);
+ StringBuilder b = new StringBuilder();
+ String[] parts = yaml.split("\n");
+ for (String part : parts) {
+ b.append(part);
+ b.append("\n");
+ }
+ setSwaggerYaml(b.toString());
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ LOG.info("Scanning REST API endpoints and generating docs has been successful.");
+ }
+ };
+ loadApiDocThread.setDaemon(true);
+ loadApiDocThread.start();
+ }
+
+ public Swagger getSwagger() {
+ return (Swagger) swaggerMap.get("swaggerObject");
+ }
+
+ public void setSwagger(final Swagger swagger) {
+ swaggerMap.put("swaggerObject", swagger);
+ }
+
+ public void setSwaggerYaml(final String swaggerYaml) {
+ swaggerMap.put("swaggerYaml", swaggerYaml);
+ }
+
+ public String getSwaggerYaml() {
+ return (String) swaggerMap.get("swaggerYaml");
+ }
+
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemProcessor.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemProcessor.java
new file mode 100644
index 00000000..a124e4d1
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemProcessor.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.item.ItemProcessor;
+
+public class DummyItemProcessor implements ItemProcessor {
+
+ private static final Logger LOG = LoggerFactory.getLogger(DummyItemProcessor.class);
+
+ @Override
+ public String process(DummyObject input) throws Exception {
+ LOG.info("Dummy processing, f1: {}, f2: {}. wait 10 seconds", input.getF1(), input.getF2());
+ Thread.sleep(10000);
+ return String.format("%s, %s", input.getF1(), input.getF2());
+ }
+
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemWriter.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemWriter.java
new file mode 100644
index 00000000..89ad0130
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemWriter.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+import org.apache.ambari.infra.conf.InfraManagerDataConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.core.annotation.BeforeStep;
+import org.springframework.batch.item.ItemWriter;
+
+import javax.inject.Inject;
+import java.io.File;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Date;
+import java.util.List;
+
+public class DummyItemWriter implements ItemWriter {
+
+ private static final Logger LOG = LoggerFactory.getLogger(DummyItemWriter.class);
+
+ private StepExecution stepExecution;
+
+ @Inject
+ private InfraManagerDataConfig infraManagerDataConfig;
+
+ @Override
+ public void write(List extends String> values) throws Exception {
+ LOG.info("DummyItem writer called (values: {})... wait 1 seconds", values.toString());
+ Thread.sleep(1000);
+ String outputDirectoryLocation = String.format("%s%s%s%s", infraManagerDataConfig.getDataFolder(), File.separator, "dummyOutput-", new Date().getTime());
+ Path pathToDirectory = Paths.get(outputDirectoryLocation);
+ Path pathToFile = Paths.get(String.format("%s%s%s", outputDirectoryLocation, File.separator, "dummyOutput.txt"));
+ Files.createDirectories(pathToDirectory);
+ LOG.info("Write location to step execution context...");
+ stepExecution.getExecutionContext().put("stepOutputLocation", pathToFile.toAbsolutePath().toString());
+ LOG.info("Write location to job execution context...");
+ stepExecution.getJobExecution().getExecutionContext().put("jobOutputLocation", pathToFile.toAbsolutePath().toString());
+ LOG.info("Write to file: {}", pathToFile.toAbsolutePath());
+ Files.write(pathToFile, values.toString().getBytes());
+ }
+
+ @BeforeStep
+ public void saveStepExecution(StepExecution stepExecution) {
+ this.stepExecution = stepExecution;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyJobConfiguration.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyJobConfiguration.java
new file mode 100644
index 00000000..a4f53696
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyJobConfiguration.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.infra.job.dummy;
+
+import javax.inject.Inject;
+
+import org.springframework.batch.core.Job;
+import org.springframework.batch.core.Step;
+import org.springframework.batch.core.configuration.annotation.JobBuilderFactory;
+import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
+import org.springframework.batch.item.ItemProcessor;
+import org.springframework.batch.item.ItemReader;
+import org.springframework.batch.item.ItemWriter;
+import org.springframework.batch.item.file.FlatFileItemReader;
+import org.springframework.batch.item.file.LineMapper;
+import org.springframework.batch.item.file.mapping.BeanWrapperFieldSetMapper;
+import org.springframework.batch.item.file.mapping.DefaultLineMapper;
+import org.springframework.batch.item.file.mapping.FieldSetMapper;
+import org.springframework.batch.item.file.transform.DelimitedLineTokenizer;
+import org.springframework.batch.item.file.transform.LineTokenizer;
+import org.springframework.beans.factory.annotation.Qualifier;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.core.io.ClassPathResource;
+
+@Configuration
+public class DummyJobConfiguration {
+ @Inject
+ private StepBuilderFactory steps;
+
+ @Inject
+ private JobBuilderFactory jobs;
+
+ @Bean(name = "dummyStep")
+ protected Step dummyStep(ItemReader reader,
+ ItemProcessor processor,
+ @Qualifier("dummyItemWriter") ItemWriter writer) {
+ return steps.get("dummyStep").listener(new DummyStepListener()). chunk(2)
+ .reader(reader).processor(processor).writer(writer).build();
+ }
+
+ @Bean(name = "dummyJob")
+ public Job job(@Qualifier("dummyStep") Step dummyStep) {
+ return jobs.get("dummyJob").listener(new DummyJobListener()).start(dummyStep).build();
+ }
+
+ @Bean
+ public ItemReader dummyItemReader() {
+ FlatFileItemReader csvFileReader = new FlatFileItemReader<>();
+ csvFileReader.setResource(new ClassPathResource("dummy/dummy.txt"));
+ csvFileReader.setLinesToSkip(1);
+ LineMapper lineMapper = dummyLineMapper();
+ csvFileReader.setLineMapper(lineMapper);
+ return csvFileReader;
+ }
+
+ @Bean
+ public ItemProcessor dummyItemProcessor() {
+ return new DummyItemProcessor();
+ }
+
+ @Bean(name = "dummyItemWriter")
+ public ItemWriter dummyItemWriter() {
+ return new DummyItemWriter();
+ }
+
+ private LineMapper dummyLineMapper() {
+ DefaultLineMapper lineMapper = new DefaultLineMapper<>();
+
+ LineTokenizer dummyTokenizer = dummyTokenizer();
+ lineMapper.setLineTokenizer(dummyTokenizer);
+
+ FieldSetMapper dummyFieldSetMapper = dummyFieldSetMapper();
+ lineMapper.setFieldSetMapper(dummyFieldSetMapper);
+
+ return lineMapper;
+ }
+
+ private FieldSetMapper dummyFieldSetMapper() {
+ BeanWrapperFieldSetMapper studentInformationMapper = new BeanWrapperFieldSetMapper<>();
+ studentInformationMapper.setTargetType(DummyObject.class);
+ return studentInformationMapper;
+ }
+
+ private LineTokenizer dummyTokenizer() {
+ DelimitedLineTokenizer studentLineTokenizer = new DelimitedLineTokenizer();
+ studentLineTokenizer.setDelimiter(",");
+ studentLineTokenizer.setNames(new String[]{"f1", "f2"});
+ return studentLineTokenizer;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyJobListener.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyJobListener.java
new file mode 100644
index 00000000..99c50e80
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyJobListener.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobExecutionListener;
+
+public class DummyJobListener implements JobExecutionListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(DummyJobListener.class);
+
+ @Override
+ public void beforeJob(JobExecution jobExecution) {
+ LOG.info("Dummy - before job execution");
+ }
+
+ @Override
+ public void afterJob(JobExecution jobExecution) {
+ LOG.info("Dummy - after job execution");
+ if (jobExecution.getExecutionContext().get("jobOutputLocation") != null) {
+ String jobOutputLocation = (String) jobExecution.getExecutionContext().get("jobOutputLocation");
+ String exitDescription = "file://" + jobOutputLocation;
+ LOG.info("Add exit description '{}'", exitDescription);
+ jobExecution.setExitStatus(new ExitStatus(ExitStatus.COMPLETED.getExitCode(), exitDescription));
+ }
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyObject.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyObject.java
new file mode 100644
index 00000000..ce087dd1
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyObject.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+public class DummyObject {
+ private String f1;
+ private String f2;
+
+ public String getF1() {
+ return f1;
+ }
+
+ public void setF1(String f1) {
+ this.f1 = f1;
+ }
+
+ public String getF2() {
+ return f2;
+ }
+
+ public void setF2(String f2) {
+ this.f2 = f2;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyStepListener.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyStepListener.java
new file mode 100644
index 00000000..548e6504
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyStepListener.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.core.StepExecutionListener;
+
+public class DummyStepListener implements StepExecutionListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(DummyStepListener.class);
+
+ @Override
+ public void beforeStep(StepExecution stepExecution) {
+ LOG.info("Dummy step - before step execution");
+ }
+
+ @Override
+ public ExitStatus afterStep(StepExecution stepExecution) {
+ LOG.info("Dummy step - after step execution");
+ return stepExecution.getExitStatus();
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java
new file mode 100644
index 00000000..fc0a4f71
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.manager;
+
+import com.google.common.collect.Lists;
+import org.apache.ambari.infra.model.ExecutionContextResponse;
+import org.apache.ambari.infra.model.JobDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionInfoResponse;
+import org.apache.ambari.infra.model.JobInstanceDetailsResponse;
+import org.apache.ambari.infra.model.JobOperationParams;
+import org.apache.ambari.infra.model.StepExecutionContextResponse;
+import org.apache.ambari.infra.model.StepExecutionInfoResponse;
+import org.apache.ambari.infra.model.StepExecutionProgressResponse;
+import org.springframework.batch.admin.history.StepExecutionHistory;
+import org.springframework.batch.admin.service.JobService;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.admin.web.JobInfo;
+import org.springframework.batch.admin.web.StepExecutionProgress;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.JobParametersBuilder;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.JobOperator;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TimeZone;
+
+@Named
+public class JobManager {
+
+ @Inject
+ private JobService jobService;
+
+ @Inject
+ private JobOperator jobOperator;
+
+ private TimeZone timeZone = TimeZone.getDefault();
+
+ public Set getAllJobNames() {
+ return jobOperator.getJobNames();
+ }
+
+ /**
+ * Launch a new job instance (based on job name) and applies customized parameters to it.
+ * Also add a new date parameter to make sure the job instance will be unique
+ */
+ public JobExecutionInfoResponse launchJob(String jobName, String params)
+ throws JobParametersInvalidException, JobInstanceAlreadyExistsException, NoSuchJobException,
+ JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException {
+ // TODO: handle params
+ JobParametersBuilder jobParametersBuilder = new JobParametersBuilder();
+ jobParametersBuilder.addDate("date", new Date());
+ return new JobExecutionInfoResponse(jobService.launch(jobName, jobParametersBuilder.toJobParameters()), timeZone);
+ }
+
+ /**
+ * Get all executions ids that mapped to specific job name,
+ */
+ public Set getExecutionIdsByJobName(String jobName) throws NoSuchJobException {
+ return jobOperator.getRunningExecutions(jobName);
+ }
+
+ /**
+ * Stop all running job executions and returns with the number of stopped jobs.
+ */
+ public Integer stopAllJobs() {
+ return jobService.stopAll();
+ }
+
+ /**
+ * Gather job execution details by job execution id.
+ */
+ public JobExecutionDetailsResponse getExectionInfo(Long jobExecutionId) throws NoSuchJobExecutionException {
+ JobExecution jobExecution = jobService.getJobExecution(jobExecutionId);
+ List stepExecutionInfos = new ArrayList();
+ for (StepExecution stepExecution : jobExecution.getStepExecutions()) {
+ stepExecutionInfos.add(new StepExecutionInfoResponse(stepExecution, timeZone));
+ }
+ Collections.sort(stepExecutionInfos, new Comparator() {
+ @Override
+ public int compare(StepExecutionInfoResponse o1, StepExecutionInfoResponse o2) {
+ return o1.getId().compareTo(o2.getId());
+ }
+ });
+ return new JobExecutionDetailsResponse(new JobExecutionInfoResponse(jobExecution, timeZone), stepExecutionInfos);
+ }
+
+ /**
+ * Stop or abandon a running job execution by job execution id
+ */
+ public JobExecutionInfoResponse stopOrAbandonJobByExecutionId(Long jobExecutionId, JobOperationParams.JobStopOrAbandonOperationParam operation)
+ throws NoSuchJobExecutionException, JobExecutionNotRunningException, JobExecutionAlreadyRunningException {
+ JobExecution jobExecution;
+ if (JobOperationParams.JobStopOrAbandonOperationParam.STOP.equals(operation)) {
+ jobExecution = jobService.stop(jobExecutionId);
+ } else if (JobOperationParams.JobStopOrAbandonOperationParam.ABANDON.equals(operation)) {
+ jobExecution = jobService.abandon(jobExecutionId);
+ } else {
+ throw new UnsupportedOperationException("Unsupported operaration");
+ }
+ return new JobExecutionInfoResponse(jobExecution, timeZone);
+ }
+
+ /**
+ * Get execution context for a job execution instance. (context can be shipped between job executions)
+ */
+ public ExecutionContextResponse getExecutionContextByJobExecutionId(Long executionId) throws NoSuchJobExecutionException {
+ JobExecution jobExecution = jobService.getJobExecution(executionId);
+ Map executionMap = new HashMap<>();
+ for (Map.Entry entry : jobExecution.getExecutionContext().entrySet()) {
+ executionMap.put(entry.getKey(), entry.getValue());
+ }
+ return new ExecutionContextResponse(executionId, executionMap);
+ }
+
+ /**
+ * Restart a specific job instance with the same parameters. (only restart operation is supported here)
+ */
+ public JobExecutionInfoResponse restart(Long jobInstanceId, String jobName,
+ JobOperationParams.JobRestartOperationParam operation) throws NoSuchJobException, JobParametersInvalidException,
+ JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException, NoSuchJobExecutionException {
+ if (JobOperationParams.JobRestartOperationParam.RESTART.equals(operation)) {
+ Collection jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstanceId);
+ JobExecution jobExecution = jobExecutions.iterator().next();
+ Long jobExecutionId = jobExecution.getId();
+ return new JobExecutionInfoResponse(jobService.restart(jobExecutionId), timeZone);
+ } else {
+ throw new UnsupportedOperationException("Unsupported operation (try: RESTART)");
+ }
+ }
+
+ /**
+ * Get all job details. (paged)
+ */
+ public List getAllJobs(int start, int pageSize) {
+ List jobs = new ArrayList<>();
+ Collection names = jobService.listJobs(start, pageSize);
+ for (String name : names) {
+ int count = 0;
+ try {
+ count = jobService.countJobExecutionsForJob(name);
+ }
+ catch (NoSuchJobException e) {
+ // shouldn't happen
+ }
+ boolean launchable = jobService.isLaunchable(name);
+ boolean incrementable = jobService.isIncrementable(name);
+ jobs.add(new JobInfo(name, count, null, launchable, incrementable));
+ }
+ return jobs;
+ }
+
+ /**
+ * Get all executions for unique job instance.
+ */
+ public List getExecutionsForJobInstance(String jobName, Long jobInstanceId) throws NoSuchJobInstanceException, NoSuchJobException {
+ List result = Lists.newArrayList();
+ JobInstance jobInstance = jobService.getJobInstance(jobInstanceId);
+ Collection jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstance.getInstanceId());
+ for (JobExecution jobExecution : jobExecutions) {
+ result.add(new JobExecutionInfoResponse(jobExecution, timeZone));
+ }
+ return result;
+ }
+
+ /**
+ * Get job details for a specific job. (paged)
+ */
+ public JobDetailsResponse getJobDetails(String jobName, int page, int size) throws NoSuchJobException {
+ List jobInstanceResponses = Lists.newArrayList();
+ Collection jobInstances = jobService.listJobInstances(jobName, page, size);
+
+ int count = jobService.countJobExecutionsForJob(jobName);
+ boolean launchable = jobService.isLaunchable(jobName);
+ boolean isIncrementable = jobService.isIncrementable(jobName);
+
+ for (JobInstance jobInstance: jobInstances) {
+ List executionInfos = Lists.newArrayList();
+ Collection jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstance.getId());
+ if (jobExecutions != null) {
+ for (JobExecution jobExecution : jobExecutions) {
+ executionInfos.add(new JobExecutionInfoResponse(jobExecution, timeZone));
+ }
+ }
+ jobInstanceResponses.add(new JobInstanceDetailsResponse(jobInstance, executionInfos));
+ }
+ return new JobDetailsResponse(new JobInfo(jobName, count, launchable, isIncrementable), jobInstanceResponses);
+ }
+
+ /**
+ * Get step execution details based for job execution id and step execution id.
+ */
+ public StepExecutionInfoResponse getStepExecution(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+ StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+ return new StepExecutionInfoResponse(stepExecution, timeZone);
+ }
+
+ /**
+ * Get step execution context details. (execution context can be shipped between steps)
+ */
+ public StepExecutionContextResponse getStepExecutionContext(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+ StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+ Map executionMap = new HashMap<>();
+ for (Map.Entry entry : stepExecution.getExecutionContext().entrySet()) {
+ executionMap.put(entry.getKey(), entry.getValue());
+ }
+ return new StepExecutionContextResponse(executionMap, jobExecutionId, stepExecutionId, stepExecution.getStepName());
+ }
+
+ /**
+ * Get step execution progress status detauls.
+ */
+ public StepExecutionProgressResponse getStepExecutionProgress(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+ StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+ StepExecutionInfoResponse stepExecutionInfoResponse = new StepExecutionInfoResponse(stepExecution, timeZone);
+ String stepName = stepExecution.getStepName();
+ if (stepName.contains(":partition")) {
+ stepName = stepName.replaceAll("(:partition).*", "$1*");
+ }
+ String jobName = stepExecution.getJobExecution().getJobInstance().getJobName();
+ StepExecutionHistory stepExecutionHistory = computeHistory(jobName, stepName);
+ StepExecutionProgress stepExecutionProgress = new StepExecutionProgress(stepExecution, stepExecutionHistory);
+
+ return new StepExecutionProgressResponse(stepExecutionProgress, stepExecutionHistory, stepExecutionInfoResponse);
+
+ }
+
+ private StepExecutionHistory computeHistory(String jobName, String stepName) {
+ int total = jobService.countStepExecutionsForStep(jobName, stepName);
+ StepExecutionHistory stepExecutionHistory = new StepExecutionHistory(stepName);
+ for (int i = 0; i < total; i += 1000) {
+ for (StepExecution stepExecution : jobService.listStepExecutionsForStep(jobName, stepName, i, 1000)) {
+ stepExecutionHistory.append(stepExecution);
+ }
+ }
+ return stepExecutionHistory;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java
new file mode 100644
index 00000000..2d46c547
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.Map;
+
+public class ExecutionContextResponse {
+
+ private final Long jobExecutionId;
+ private final Map executionContextMap;
+
+ public ExecutionContextResponse(Long jobExecutionId, Map executionContextMap) {
+ this.jobExecutionId = jobExecutionId;
+ this.executionContextMap = executionContextMap;
+ }
+
+ public Long getJobExecutionId() {
+ return jobExecutionId;
+ }
+
+ public Map getExecutionContextMap() {
+ return executionContextMap;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java
new file mode 100644
index 00000000..cd34fefd
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.admin.web.JobInfo;
+
+import java.util.List;
+
+public class JobDetailsResponse {
+
+ private JobInfo jobInfo;
+ private List jobInstanceDetailsResponseList;
+
+ public JobDetailsResponse() {
+ }
+
+ public JobDetailsResponse(JobInfo jobInfo, List jobInstanceDetailsResponseList) {
+ this.jobInfo = jobInfo;
+ this.jobInstanceDetailsResponseList = jobInstanceDetailsResponseList;
+ }
+
+ public JobInfo getJobInfo() {
+ return jobInfo;
+ }
+
+ public void setJobInfo(JobInfo jobInfo) {
+ this.jobInfo = jobInfo;
+ }
+
+ public List getJobInstanceDetailsResponseList() {
+ return jobInstanceDetailsResponseList;
+ }
+
+ public void setJobInstanceDetailsResponseList(List jobInstanceDetailsResponseList) {
+ this.jobInstanceDetailsResponseList = jobInstanceDetailsResponseList;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java
new file mode 100644
index 00000000..695b57f0
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.List;
+
+public class JobExecutionDetailsResponse {
+
+ private JobExecutionInfoResponse jobExecutionInfoResponse;
+
+ private List stepExecutionInfoList;
+
+ public JobExecutionDetailsResponse(JobExecutionInfoResponse jobExecutionInfoResponse, List stepExecutionInfoList) {
+ this.jobExecutionInfoResponse = jobExecutionInfoResponse;
+ this.stepExecutionInfoList = stepExecutionInfoList;
+ }
+
+ public JobExecutionInfoResponse getJobExecutionInfoResponse() {
+ return jobExecutionInfoResponse;
+ }
+
+ public void setJobExecutionInfoResponse(JobExecutionInfoResponse jobExecutionInfoResponse) {
+ this.jobExecutionInfoResponse = jobExecutionInfoResponse;
+ }
+
+ public List getStepExecutionInfoList() {
+ return stepExecutionInfoList;
+ }
+
+ public void setStepExecutionInfoList(List stepExecutionInfoList) {
+ this.stepExecutionInfoList = stepExecutionInfoList;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java
new file mode 100644
index 00000000..a7e4a4f0
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.apache.ambari.infra.model.wrapper.JobExecutionData;
+import org.springframework.batch.admin.web.JobParametersExtractor;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.converter.DefaultJobParametersConverter;
+import org.springframework.batch.core.converter.JobParametersConverter;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Properties;
+import java.util.TimeZone;
+
+public class JobExecutionInfoResponse {
+ private Long id;
+ private int stepExecutionCount;
+ private Long jobId;
+ private String jobName;
+ private String startDate = "";
+ private String startTime = "";
+ private String duration = "";
+ private JobExecutionData jobExecutionData;
+ private Properties jobParameters;
+ private String jobParametersString;
+ private boolean restartable = false;
+ private boolean abandonable = false;
+ private boolean stoppable = false;
+ private final TimeZone timeZone;
+
+
+ public JobExecutionInfoResponse(JobExecution jobExecution, TimeZone timeZone) {
+ JobParametersConverter converter = new DefaultJobParametersConverter();
+ this.jobExecutionData = new JobExecutionData(jobExecution);
+ this.timeZone = timeZone;
+ this.id = jobExecutionData.getId();
+ this.jobId = jobExecutionData.getJobId();
+ this.stepExecutionCount = jobExecutionData.getStepExecutions().size();
+ this.jobParameters = converter.getProperties(jobExecutionData.getJobParameters());
+ this.jobParametersString = (new JobParametersExtractor()).fromJobParameters(jobExecutionData.getJobParameters());
+ JobInstance jobInstance = jobExecutionData.getJobInstance();
+ if(jobInstance != null) {
+ this.jobName = jobInstance.getJobName();
+ BatchStatus endTime = jobExecutionData.getStatus();
+ this.restartable = endTime.isGreaterThan(BatchStatus.STOPPING) && endTime.isLessThan(BatchStatus.ABANDONED);
+ this.abandonable = endTime.isGreaterThan(BatchStatus.STARTED) && endTime != BatchStatus.ABANDONED;
+ this.stoppable = endTime.isLessThan(BatchStatus.STOPPING);
+ } else {
+ this.jobName = "?";
+ }
+
+ SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+ SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss");
+ SimpleDateFormat durationFormat = new SimpleDateFormat("HH:mm:ss");
+
+ durationFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+ timeFormat.setTimeZone(timeZone);
+ dateFormat.setTimeZone(timeZone);
+ if(jobExecutionData.getStartTime() != null) {
+ this.startDate = dateFormat.format(jobExecutionData.getStartTime());
+ this.startTime = timeFormat.format(jobExecutionData.getStartTime());
+ Date endTime1 = jobExecutionData.getEndTime() != null? jobExecutionData.getEndTime():new Date();
+ this.duration = durationFormat.format(new Date(endTime1.getTime() - jobExecutionData.getStartTime().getTime()));
+ }
+ }
+
+ public Long getId() {
+ return id;
+ }
+
+ public int getStepExecutionCount() {
+ return stepExecutionCount;
+ }
+
+ public Long getJobId() {
+ return jobId;
+ }
+
+ public String getJobName() {
+ return jobName;
+ }
+
+ public String getStartDate() {
+ return startDate;
+ }
+
+ public String getStartTime() {
+ return startTime;
+ }
+
+ public String getDuration() {
+ return duration;
+ }
+
+ public JobExecutionData getJobExecutionData() {
+ return jobExecutionData;
+ }
+
+ public Properties getJobParameters() {
+ return jobParameters;
+ }
+
+ public String getJobParametersString() {
+ return jobParametersString;
+ }
+
+ public boolean isRestartable() {
+ return restartable;
+ }
+
+ public boolean isAbandonable() {
+ return abandonable;
+ }
+
+ public boolean isStoppable() {
+ return stoppable;
+ }
+
+ public TimeZone getTimeZone() {
+ return timeZone;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java
new file mode 100644
index 00000000..b4c20e9f
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.ws.rs.PathParam;
+
+public class JobExecutionRequest {
+
+ @PathParam("jobName")
+ private String jobName;
+
+ @PathParam("jobInstanceId")
+ private Long jobInstanceId;
+
+ public String getJobName() {
+ return jobName;
+ }
+
+ public Long getJobInstanceId() {
+ return jobInstanceId;
+ }
+
+ public void setJobName(String jobName) {
+ this.jobName = jobName;
+ }
+
+ public void setJobInstanceId(Long jobInstanceId) {
+ this.jobInstanceId = jobInstanceId;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java
new file mode 100644
index 00000000..88687e72
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+public class JobExecutionRestartRequest {
+
+ private String jobName;
+
+ private Long jobInstanceId;
+
+ private JobOperationParams.JobRestartOperationParam operation;
+
+ public String getJobName() {
+ return jobName;
+ }
+
+ public void setJobName(String jobName) {
+ this.jobName = jobName;
+ }
+
+ public Long getJobInstanceId() {
+ return jobInstanceId;
+ }
+
+ public void setJobExecutionId(Long jobExecutionId) {
+ this.jobInstanceId = jobExecutionId;
+ }
+
+ public JobOperationParams.JobRestartOperationParam getOperation() {
+ return operation;
+ }
+
+ public void setOperation(JobOperationParams.JobRestartOperationParam operation) {
+ this.operation = operation;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java
new file mode 100644
index 00000000..b176f125
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+
+public class JobExecutionStopRequest {
+
+ @PathParam("jobExecutionId")
+ @NotNull
+ private Long jobExecutionId;
+
+ @QueryParam("operation")
+ @NotNull
+ private JobOperationParams.JobStopOrAbandonOperationParam operation;
+
+ public Long getJobExecutionId() {
+ return jobExecutionId;
+ }
+
+ public void setJobExecutionId(Long jobExecutionId) {
+ this.jobExecutionId = jobExecutionId;
+ }
+
+ public JobOperationParams.JobStopOrAbandonOperationParam getOperation() {
+ return operation;
+ }
+
+ public void setOperation(JobOperationParams.JobStopOrAbandonOperationParam operation) {
+ this.operation = operation;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java
new file mode 100644
index 00000000..af886545
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.core.JobInstance;
+
+import java.util.List;
+
+public class JobInstanceDetailsResponse {
+
+ private JobInstance jobInstance;
+
+ private List jobExecutionInfoResponseList;
+
+ public JobInstanceDetailsResponse() {
+ }
+
+ public JobInstanceDetailsResponse(JobInstance jobInstance, List jobExecutionInfoResponseList) {
+ this.jobInstance = jobInstance;
+ this.jobExecutionInfoResponseList = jobExecutionInfoResponseList;
+ }
+
+ public JobInstance getJobInstance() {
+ return jobInstance;
+ }
+
+ public void setJobInstance(JobInstance jobInstance) {
+ this.jobInstance = jobInstance;
+ }
+
+ public List getJobExecutionInfoResponseList() {
+ return jobExecutionInfoResponseList;
+ }
+
+ public void setJobExecutionInfoResponseList(List jobExecutionInfoResponseList) {
+ this.jobExecutionInfoResponseList = jobExecutionInfoResponseList;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java
new file mode 100644
index 00000000..905a4fa6
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+
+public class JobInstanceStartRequest {
+
+ @PathParam("jobName")
+ @NotNull
+ private String jobName;
+
+ @QueryParam("params")
+ String params;
+
+ public String getJobName() {
+ return jobName;
+ }
+
+ public void setJobName(String jobName) {
+ this.jobName = jobName;
+ }
+
+ public String getParams() {
+ return params;
+ }
+
+ public void setParams(String params) {
+ this.params = params;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java
new file mode 100644
index 00000000..e286debe
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+public class JobOperationParams {
+
+ public enum JobStopOrAbandonOperationParam {
+ STOP, ABANDON;
+ }
+
+ public enum JobRestartOperationParam {
+ RESTART;
+ }
+
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java
new file mode 100644
index 00000000..b4fd4785
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+
+public class JobRequest extends PageRequest {
+
+ @NotNull
+ @PathParam("jobName")
+ private String jobName;
+
+ public String getJobName() {
+ return jobName;
+ }
+
+ public void setJobName(String jobName) {
+ this.jobName = jobName;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java
new file mode 100644
index 00000000..679d4fd8
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.QueryParam;
+
+public class PageRequest {
+
+ @QueryParam("page")
+ @DefaultValue("0")
+ private int page;
+
+ @QueryParam("size")
+ @DefaultValue("20")
+ private int size;
+
+ public int getPage() {
+ return page;
+ }
+
+ public void setPage(int page) {
+ this.page = page;
+ }
+
+ public int getSize() {
+ return size;
+ }
+
+ public void setSize(int size) {
+ this.size = size;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java
new file mode 100644
index 00000000..0e67a879
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.Map;
+
+public class StepExecutionContextResponse {
+
+ private Map executionContextMap;
+
+ private Long jobExecutionId;
+
+ private Long stepExecutionId;
+
+ private String stepName;
+
+ public StepExecutionContextResponse() {
+ }
+
+ public StepExecutionContextResponse(Map executionContextMap, Long jobExecutionId, Long stepExecutionId, String stepName) {
+ this.executionContextMap = executionContextMap;
+ this.jobExecutionId = jobExecutionId;
+ this.stepExecutionId = stepExecutionId;
+ this.stepName = stepName;
+ }
+
+ public Map getExecutionContextMap() {
+ return executionContextMap;
+ }
+
+ public Long getJobExecutionId() {
+ return jobExecutionId;
+ }
+
+ public Long getStepExecutionId() {
+ return stepExecutionId;
+ }
+
+ public String getStepName() {
+ return stepName;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java
new file mode 100644
index 00000000..ed04767b
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.apache.ambari.infra.model.wrapper.StepExecutionData;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.StepExecution;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.TimeZone;
+
+public class StepExecutionInfoResponse {
+ private Long id;
+ private Long jobExecutionId;
+ private String jobName;
+ private String name;
+ private String startDate = "-";
+ private String startTime = "-";
+ private String duration = "-";
+ private StepExecutionData stepExecutionData;
+ private long durationMillis;
+
+ public StepExecutionInfoResponse(String jobName, Long jobExecutionId, String name, TimeZone timeZone) {
+ this.jobName = jobName;
+ this.jobExecutionId = jobExecutionId;
+ this.name = name;
+ this.stepExecutionData = new StepExecutionData(new StepExecution(name, new JobExecution(jobExecutionId)));
+ }
+
+ public StepExecutionInfoResponse(StepExecution stepExecution, TimeZone timeZone) {
+ this.stepExecutionData = new StepExecutionData(stepExecution);
+ this.id = stepExecutionData.getId();
+ this.name = stepExecutionData.getStepName();
+ this.jobName = stepExecutionData.getJobExecution() != null && stepExecutionData.getJobExecution().getJobInstance() != null? stepExecutionData.getJobExecution().getJobInstance().getJobName():"?";
+ this.jobExecutionId = stepExecutionData.getJobExecutionId();
+ SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+ SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss");
+ SimpleDateFormat durationFormat = new SimpleDateFormat("HH:mm:ss");
+
+ durationFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+ timeFormat.setTimeZone(timeZone);
+ dateFormat.setTimeZone(timeZone);
+ if(stepExecutionData.getStartTime() != null) {
+ this.startDate = dateFormat.format(stepExecutionData.getStartTime());
+ this.startTime = timeFormat.format(stepExecutionData.getStartTime());
+ Date endTime = stepExecutionData.getEndTime() != null? stepExecutionData.getEndTime():new Date();
+ this.durationMillis = endTime.getTime() - stepExecutionData.getStartTime().getTime();
+ this.duration = durationFormat.format(new Date(this.durationMillis));
+ }
+
+ }
+
+ public Long getId() {
+ return this.id;
+ }
+
+ public Long getJobExecutionId() {
+ return this.jobExecutionId;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public String getJobName() {
+ return this.jobName;
+ }
+
+ public String getStartDate() {
+ return this.startDate;
+ }
+
+ public String getStartTime() {
+ return this.startTime;
+ }
+
+ public String getDuration() {
+ return this.duration;
+ }
+
+ public long getDurationMillis() {
+ return this.durationMillis;
+ }
+
+ public String getStatus() {
+ return this.id != null?this.stepExecutionData.getStatus().toString():"NONE";
+ }
+
+ public String getExitCode() {
+ return this.id != null?this.stepExecutionData.getExitStatus().getExitCode():"NONE";
+ }
+
+ @JsonIgnore
+ public StepExecutionData getStepExecution() {
+ return this.stepExecutionData;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java
new file mode 100644
index 00000000..26f9ed4f
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.admin.history.StepExecutionHistory;
+import org.springframework.batch.admin.web.StepExecutionProgress;
+
+public class StepExecutionProgressResponse {
+
+ private StepExecutionProgress stepExecutionProgress;
+
+ private StepExecutionHistory stepExecutionHistory;
+
+ private StepExecutionInfoResponse stepExecutionInfoResponse;
+
+ public StepExecutionProgressResponse() {
+ }
+
+ public StepExecutionProgressResponse(StepExecutionProgress stepExecutionProgress, StepExecutionHistory stepExecutionHistory,
+ StepExecutionInfoResponse stepExecutionInfoResponse) {
+ this.stepExecutionProgress = stepExecutionProgress;
+ this.stepExecutionHistory = stepExecutionHistory;
+ this.stepExecutionInfoResponse = stepExecutionInfoResponse;
+ }
+
+ public StepExecutionProgress getStepExecutionProgress() {
+ return stepExecutionProgress;
+ }
+
+ public StepExecutionHistory getStepExecutionHistory() {
+ return stepExecutionHistory;
+ }
+
+ public StepExecutionInfoResponse getStepExecutionInfoResponse() {
+ return stepExecutionInfoResponse;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java
new file mode 100644
index 00000000..22281712
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+
+public class StepExecutionRequest {
+
+ @PathParam("jobExecutionId")
+ @NotNull
+ private Long jobExecutionId;
+
+ @PathParam("stepExecutionId")
+ @NotNull
+ private Long stepExecutionId;
+
+ public Long getJobExecutionId() {
+ return jobExecutionId;
+ }
+
+ public void setJobExecutionId(Long jobExecutionId) {
+ this.jobExecutionId = jobExecutionId;
+ }
+
+ public Long getStepExecutionId() {
+ return stepExecutionId;
+ }
+
+ public void setStepExecutionId(Long stepExecutionId) {
+ this.stepExecutionId = stepExecutionId;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java
new file mode 100644
index 00000000..28e262ae
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model.wrapper;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.collect.Lists;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.JobParameters;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.item.ExecutionContext;
+
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+
+/**
+ * Wrapper for #{{@link JobExecution}}
+ */
+public class JobExecutionData {
+
+ private JobExecution jobExecution;
+
+ public JobExecutionData(JobExecution jobExecution) {
+ this.jobExecution = jobExecution;
+ }
+
+ @JsonIgnore
+ public JobExecution getJobExecution() {
+ return jobExecution;
+ }
+
+ @JsonIgnore
+ public Collection getStepExecutions() {
+ return jobExecution.getStepExecutions();
+ }
+
+ public JobParameters getJobParameters() {
+ return jobExecution.getJobParameters();
+ }
+
+ public JobInstance getJobInstance() {
+ return jobExecution.getJobInstance();
+ }
+
+ public Collection getStepExecutionDataList() {
+ List stepExecutionDataList = Lists.newArrayList();
+ Collection stepExecutions = getStepExecutions();
+ if (stepExecutions != null) {
+ for (StepExecution stepExecution : stepExecutions) {
+ stepExecutionDataList.add(new StepExecutionData(stepExecution));
+ }
+ }
+ return stepExecutionDataList;
+ }
+
+ public BatchStatus getStatus() {
+ return jobExecution.getStatus();
+ }
+
+ public Date getStartTime() {
+ return jobExecution.getStartTime();
+ }
+
+ public Date getCreateTime() {
+ return jobExecution.getCreateTime();
+ }
+
+ public Date getEndTime() {
+ return jobExecution.getEndTime();
+ }
+
+ public Date getLastUpdated() {
+ return jobExecution.getLastUpdated();
+ }
+
+ public ExitStatus getExitStatus() {
+ return jobExecution.getExitStatus();
+ }
+
+ public ExecutionContext getExecutionContext() {
+ return jobExecution.getExecutionContext();
+ }
+
+ public List getFailureExceptions() {
+ return jobExecution.getFailureExceptions();
+ }
+
+ public String getJobConfigurationName() {
+ return jobExecution.getJobConfigurationName();
+ }
+
+ public Long getId() {
+ return jobExecution.getId();
+ }
+
+ public Long getJobId() {
+ return jobExecution.getJobId();
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java
new file mode 100644
index 00000000..26552ae6
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model.wrapper;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.item.ExecutionContext;
+
+import java.util.Date;
+import java.util.List;
+
+/**
+ * Wrapper for #{{@link StepExecution}}
+ */
+public class StepExecutionData {
+
+ @JsonIgnore
+ private final JobExecution jobExecution;
+
+ @JsonIgnore
+ private final StepExecution stepExecution;
+
+
+ public StepExecutionData(StepExecution stepExecution) {
+ this.stepExecution = stepExecution;
+ this.jobExecution = stepExecution.getJobExecution();
+ }
+
+ @JsonIgnore
+ public JobExecution getJobExecution() {
+ return jobExecution;
+ }
+
+ @JsonIgnore
+ public StepExecution getStepExecution() {
+ return stepExecution;
+ }
+
+ public String getStepName() {
+ return stepExecution.getStepName();
+ }
+
+ public int getReadCount() {
+ return stepExecution.getReadCount();
+ }
+
+ public BatchStatus getStatus() {
+ return stepExecution.getStatus();
+ }
+
+ public int getWriteCount() {
+ return stepExecution.getWriteCount();
+ }
+
+ public int getCommitCount() {
+ return stepExecution.getCommitCount();
+ }
+
+ public int getRollbackCount() {
+ return stepExecution.getRollbackCount();
+ }
+
+ public int getReadSkipCount() {
+ return stepExecution.getReadSkipCount();
+ }
+
+ public int getProcessSkipCount() {
+ return stepExecution.getProcessSkipCount();
+ }
+
+ public Date getStartTime() {
+ return stepExecution.getStartTime();
+ }
+
+ public int getWriteSkipCount() {
+ return stepExecution.getWriteSkipCount();
+ }
+
+ public Date getEndTime() {
+ return stepExecution.getEndTime();
+ }
+
+ public Date getLastUpdated() {
+ return stepExecution.getLastUpdated();
+ }
+
+ public ExecutionContext getExecutionContext() {
+ return stepExecution.getExecutionContext();
+ }
+
+ public ExitStatus getExitStatus() {
+ return stepExecution.getExitStatus();
+ }
+
+ public boolean isTerminateOnly() {
+ return stepExecution.isTerminateOnly();
+ }
+
+ public int getFilterCount() {
+ return stepExecution.getFilterCount();
+ }
+
+ public List getFailureExceptions() {
+ return stepExecution.getFailureExceptions();
+ }
+
+ public Long getId() {
+ return stepExecution.getId();
+ }
+
+ public Long getJobExecutionId() {
+ return stepExecution.getJobExecutionId();
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/ApiDocResource.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/ApiDocResource.java
new file mode 100644
index 00000000..18dfdd9d
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/ApiDocResource.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.rest;
+
+import io.swagger.annotations.ApiOperation;
+import org.apache.ambari.infra.doc.InfraManagerApiDocStorage;
+import org.springframework.context.annotation.Scope;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+@Path("swagger.{type:json|yaml}")
+@Named
+@Scope("request")
+public class ApiDocResource {
+
+ @Inject
+ private InfraManagerApiDocStorage infraManagerApiDocStorage;
+
+ @GET
+ @Produces({MediaType.APPLICATION_JSON, "application/yaml"})
+ @ApiOperation(value = "The swagger definition in either JSON or YAML", hidden = true)
+ public Response swaggerDefinitionResponse(@PathParam("type") String type) {
+ Response response = Response.status(404).build();
+ if (infraManagerApiDocStorage.getSwagger() != null) {
+ if ("yaml".equalsIgnoreCase(type)) {
+ response = Response.ok().entity(infraManagerApiDocStorage.getSwaggerYaml()).type("application/yaml").build();
+ } else {
+ response = Response.ok().entity(infraManagerApiDocStorage.getSwagger()).build();
+ }
+ }
+ return response;
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java
new file mode 100644
index 00000000..079cce3e
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.rest;
+
+
+import com.google.common.collect.Maps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.launch.JobExecutionNotFailedException;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobExecutionNotStoppedException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.JobParametersNotFoundException;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+import org.springframework.batch.core.step.NoSuchStepException;
+import org.springframework.web.bind.MethodArgumentNotValidException;
+
+import javax.batch.operations.JobExecutionAlreadyCompleteException;
+import javax.inject.Named;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.ext.ExceptionMapper;
+import javax.ws.rs.ext.Provider;
+import java.util.Map;
+
+@Named
+@Provider
+public class JobExceptionMapper implements ExceptionMapper {
+
+ private static final Logger LOG = LoggerFactory.getLogger(JobExceptionMapper.class);
+
+ private static final Map exceptionStatusCodeMap = Maps.newHashMap();
+
+ static {
+ exceptionStatusCodeMap.put(MethodArgumentNotValidException.class, Response.Status.BAD_REQUEST);
+ exceptionStatusCodeMap.put(NoSuchJobException.class, Response.Status.NOT_FOUND);
+ exceptionStatusCodeMap.put(NoSuchStepException.class, Response.Status.NOT_FOUND);
+ exceptionStatusCodeMap.put(NoSuchStepExecutionException.class, Response.Status.NOT_FOUND);
+ exceptionStatusCodeMap.put(NoSuchJobExecutionException.class, Response.Status.NOT_FOUND);
+ exceptionStatusCodeMap.put(NoSuchJobInstanceException.class, Response.Status.NOT_FOUND);
+ exceptionStatusCodeMap.put(JobExecutionNotRunningException.class, Response.Status.INTERNAL_SERVER_ERROR);
+ exceptionStatusCodeMap.put(JobExecutionNotStoppedException.class, Response.Status.INTERNAL_SERVER_ERROR);
+ exceptionStatusCodeMap.put(JobInstanceAlreadyExistsException.class, Response.Status.ACCEPTED);
+ exceptionStatusCodeMap.put(JobInstanceAlreadyCompleteException.class, Response.Status.ACCEPTED);
+ exceptionStatusCodeMap.put(JobExecutionAlreadyRunningException.class, Response.Status.ACCEPTED);
+ exceptionStatusCodeMap.put(JobExecutionAlreadyCompleteException.class, Response.Status.ACCEPTED);
+ exceptionStatusCodeMap.put(JobParametersNotFoundException.class, Response.Status.NOT_FOUND);
+ exceptionStatusCodeMap.put(JobExecutionNotFailedException.class, Response.Status.INTERNAL_SERVER_ERROR);
+ exceptionStatusCodeMap.put(JobRestartException.class, Response.Status.INTERNAL_SERVER_ERROR);
+ exceptionStatusCodeMap.put(JobParametersInvalidException.class, Response.Status.BAD_REQUEST);
+ }
+
+ @Override
+ public Response toResponse(Throwable throwable) {
+ LOG.error("REST Exception occurred:", throwable);
+ Response.Status status = Response.Status.INTERNAL_SERVER_ERROR;
+
+ for (Map.Entry entry : exceptionStatusCodeMap.entrySet()) {
+ if (throwable.getClass().isAssignableFrom(entry.getKey())) {
+ status = entry.getValue();
+ LOG.info("Exception mapped to: {} with status code: {}", entry.getKey().getCanonicalName(), entry.getValue().getStatusCode());
+ break;
+ }
+ }
+
+ return Response.status(status).entity(new StatusMessage(throwable.getMessage(), status.getStatusCode()))
+ .type(MediaType.APPLICATION_JSON_TYPE).build();
+ }
+
+ private class StatusMessage {
+ private String message;
+ private int statusCode;
+
+ StatusMessage(String message, int statusCode) {
+ this.message = message;
+ this.statusCode = statusCode;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public int getStatusCode() {
+ return statusCode;
+ }
+ }
+}
diff --git a/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
new file mode 100644
index 00000000..0e20b54a
--- /dev/null
+++ b/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.rest;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import org.apache.ambari.infra.manager.JobManager;
+import org.apache.ambari.infra.model.ExecutionContextResponse;
+import org.apache.ambari.infra.model.JobDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionInfoResponse;
+import org.apache.ambari.infra.model.JobExecutionRequest;
+import org.apache.ambari.infra.model.JobExecutionRestartRequest;
+import org.apache.ambari.infra.model.JobExecutionStopRequest;
+import org.apache.ambari.infra.model.JobInstanceStartRequest;
+import org.apache.ambari.infra.model.JobRequest;
+import org.apache.ambari.infra.model.PageRequest;
+import org.apache.ambari.infra.model.StepExecutionContextResponse;
+import org.apache.ambari.infra.model.StepExecutionInfoResponse;
+import org.apache.ambari.infra.model.StepExecutionProgressResponse;
+import org.apache.ambari.infra.model.StepExecutionRequest;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.admin.web.JobInfo;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+import org.springframework.context.annotation.Scope;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+import javax.validation.Valid;
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.BeanParam;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import java.util.List;
+import java.util.Set;
+
+@Api(value = "jobs", description = "Job operations")
+@Path("jobs")
+@Named
+@Scope("request")
+public class JobResource {
+
+ @Inject
+ private JobManager jobManager;
+
+ @GET
+ @Produces({"application/json"})
+ @ApiOperation("Get all jobs")
+ public List getAllJobs(@BeanParam @Valid PageRequest request) {
+ return jobManager.getAllJobs(request.getPage(), request.getSize());
+ }
+
+ @POST
+ @Produces({"application/json"})
+ @Path("{jobName}")
+ @ApiOperation("Start a new job instance by job name.")
+ public JobExecutionInfoResponse startJob(@BeanParam @Valid JobInstanceStartRequest request)
+ throws JobParametersInvalidException, JobInstanceAlreadyExistsException, NoSuchJobException, JobExecutionAlreadyRunningException,
+ JobRestartException, JobInstanceAlreadyCompleteException {
+ return jobManager.launchJob(request.getJobName(), request.getParams());
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("/info/names")
+ @ApiOperation("Get all job names")
+ public Set getAllJobNames() {
+ return jobManager.getAllJobNames();
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("{jobName}/info")
+ @ApiOperation("Get job details by job name.")
+ public JobDetailsResponse getJobDetails(@BeanParam @Valid JobRequest jobRequest) throws NoSuchJobException {
+ return jobManager.getJobDetails(jobRequest.getJobName(), jobRequest.getPage(), jobRequest.getSize());
+ }
+
+ @GET
+ @Path("{jobName}/executions")
+ @Produces({"application/json"})
+ @ApiOperation("Get the id values of all the running job instances.")
+ public Set getExecutionIdsByJobName(@PathParam("jobName") @NotNull @Valid String jobName) throws NoSuchJobException {
+ return jobManager.getExecutionIdsByJobName(jobName);
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("/executions/{jobExecutionId}")
+ @ApiOperation("Get job and step details for job execution instance.")
+ public JobExecutionDetailsResponse getExectionInfo(@PathParam("jobExecutionId") @Valid Long jobExecutionId) throws NoSuchJobExecutionException {
+ return jobManager.getExectionInfo(jobExecutionId);
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("/executions/{jobExecutionId}/context")
+ @ApiOperation("Get execution context for specific job.")
+ public ExecutionContextResponse getExecutionContextByJobExecId(@PathParam("jobExecutionId") Long executionId) throws NoSuchJobExecutionException {
+ return jobManager.getExecutionContextByJobExecutionId(executionId);
+ }
+
+
+ @DELETE
+ @Produces({"application/json"})
+ @Path("/executions/{jobExecutionId}")
+ @ApiOperation("Stop or abandon a running job execution.")
+ public JobExecutionInfoResponse stopOrAbandonJobExecution(@BeanParam @Valid JobExecutionStopRequest request)
+ throws NoSuchJobExecutionException, JobExecutionNotRunningException, JobExecutionAlreadyRunningException {
+ return jobManager.stopOrAbandonJobByExecutionId(request.getJobExecutionId(), request.getOperation());
+ }
+
+ @DELETE
+ @Produces({"application/json"})
+ @Path("/executions")
+ @ApiOperation("Stop all job executions.")
+ public Integer stopAll() {
+ return jobManager.stopAllJobs();
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("/{jobName}/{jobInstanceId}/executions")
+ @ApiOperation("Get execution for job instance.")
+ public List getExecutionsForInstance(@BeanParam @Valid JobExecutionRequest request) throws JobInstanceAlreadyCompleteException,
+ NoSuchJobExecutionException, JobExecutionAlreadyRunningException, JobParametersInvalidException, JobRestartException, NoSuchJobException, NoSuchJobInstanceException {
+ return jobManager.getExecutionsForJobInstance(request.getJobName(), request.getJobInstanceId());
+ }
+
+ @POST
+ @Produces({"application/json"})
+ @Path("/{jobName}/{jobInstanceId}/executions")
+ @ApiOperation("Restart job instance.")
+ public JobExecutionInfoResponse restartJobInstance(@BeanParam @Valid JobExecutionRestartRequest request) throws JobInstanceAlreadyCompleteException,
+ NoSuchJobExecutionException, JobExecutionAlreadyRunningException, JobParametersInvalidException, JobRestartException, NoSuchJobException {
+ return jobManager.restart(request.getJobInstanceId(), request.getJobName(), request.getOperation());
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}")
+ @ApiOperation("Get step execution details.")
+ public StepExecutionInfoResponse getStepExecution(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+ return jobManager.getStepExecution(request.getJobExecutionId(), request.getStepExecutionId());
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}/execution-context")
+ @ApiOperation("Get the execution context of step execution.")
+ public StepExecutionContextResponse getStepExecutionContext(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+ return jobManager.getStepExecutionContext(request.getJobExecutionId(), request.getStepExecutionId());
+ }
+
+ @GET
+ @Produces({"application/json"})
+ @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}/progress")
+ @ApiOperation("Get progress of step execution.")
+ public StepExecutionProgressResponse getStepExecutionProgress(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+ return jobManager.getStepExecutionProgress(request.getJobExecutionId(), request.getStepExecutionId());
+ }
+
+}
diff --git a/ambari-infra-manager/src/main/resources/dummy/dummy.txt b/ambari-infra-manager/src/main/resources/dummy/dummy.txt
new file mode 100644
index 00000000..41da7250
--- /dev/null
+++ b/ambari-infra-manager/src/main/resources/dummy/dummy.txt
@@ -0,0 +1,3 @@
+f1,f2
+v1,v2
+v3,v4
\ No newline at end of file
diff --git a/ambari-infra-manager/src/main/resources/infra-manager-env.sh b/ambari-infra-manager/src/main/resources/infra-manager-env.sh
new file mode 100644
index 00000000..c7e11c32
--- /dev/null
+++ b/ambari-infra-manager/src/main/resources/infra-manager-env.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Extend with java options or system properties. e.g.: INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+export INFRA_MANAGER_OPTS=""
\ No newline at end of file
diff --git a/ambari-infra-manager/src/main/resources/infra-manager.properties b/ambari-infra-manager/src/main/resources/infra-manager.properties
new file mode 100644
index 00000000..81623764
--- /dev/null
+++ b/ambari-infra-manager/src/main/resources/infra-manager.properties
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+infra-manager.batch.db.file=job-repository.db
+infra-manager.batch.db.init=true
+infra-manager.batch.db.username=admin
+infra-manager.batch.db.password=admin
+management.security.enabled=false
+management.health.solr.enabled=false
diff --git a/ambari-infra-manager/src/main/resources/infraManager.sh b/ambari-infra-manager/src/main/resources/infraManager.sh
new file mode 100644
index 00000000..65287b26
--- /dev/null
+++ b/ambari-infra-manager/src/main/resources/infraManager.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JVM="java"
+sdir="`dirname \"$0\"`"
+
+PATH=$JAVA_HOME/bin:$PATH nohup $JVM -classpath "/etc/ambari-infra-manager/conf:$sdir:$sdir/libs/*" $INFRA_MANAGER_OPTS org.apache.ambari.infra.InfraManager ${1+"$@"} &
\ No newline at end of file
diff --git a/ambari-infra-manager/src/main/resources/log4j2.xml b/ambari-infra-manager/src/main/resources/log4j2.xml
new file mode 100644
index 00000000..ad1adcdc
--- /dev/null
+++ b/ambari-infra-manager/src/main/resources/log4j2.xml
@@ -0,0 +1,41 @@
+
+
+
+
+ out/infra-manager.log
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ambari-infra-manager/src/main/resources/static/index.html b/ambari-infra-manager/src/main/resources/static/index.html
new file mode 100644
index 00000000..3e648674
--- /dev/null
+++ b/ambari-infra-manager/src/main/resources/static/index.html
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+ Welcome!
+
+
\ No newline at end of file
diff --git a/ambari-infra-manager/src/main/resources/swagger/swagger.html b/ambari-infra-manager/src/main/resources/swagger/swagger.html
new file mode 100644
index 00000000..52ebf615
--- /dev/null
+++ b/ambari-infra-manager/src/main/resources/swagger/swagger.html
@@ -0,0 +1,115 @@
+
+
+
+
+ Infra Manager REST API
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ambari-infra-solr-client/build.xml b/ambari-infra-solr-client/build.xml
index a54e336b..25ff0cb3 100644
--- a/ambari-infra-solr-client/build.xml
+++ b/ambari-infra-solr-client/build.xml
@@ -35,6 +35,8 @@
+
+
diff --git a/ambari-infra-solr-client/pom.xml b/ambari-infra-solr-client/pom.xml
index 8cb22483..0d323743 100644
--- a/ambari-infra-solr-client/pom.xml
+++ b/ambari-infra-solr-client/pom.xml
@@ -35,6 +35,16 @@
solr-solrj
${solr.version}
+
+ org.apache.lucene
+ lucene-core
+ ${solr.version}
+
+
+ org.apache.lucene
+ lucene-backward-codecs
+ ${solr.version}
+
org.apache.zookeeper
zookeeper
@@ -60,15 +70,22 @@
org.slf4j
slf4j-api
+ 1.7.20
org.slf4j
slf4j-log4j12
+ 1.7.20
log4j
log4j
+
+ com.amazonaws
+ aws-java-sdk-s3
+ 1.11.5
+
junit
junit
diff --git a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
index e3a1e797..95708d70 100644
--- a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
+++ b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
@@ -52,19 +52,21 @@ public class AmbariSolrCloudCLI {
private static final String UNSECURE_ZNODE_COMMAND = "unsecure-znode";
private static final String SECURE_SOLR_ZNODE_COMMAND = "secure-solr-znode";
private static final String SECURITY_JSON_LOCATION = "security-json-location";
+ private static final String REMOVE_ADMIN_HANDLERS = "remove-admin-handlers";
private static final String CMD_LINE_SYNTAX =
"\n./solrCloudCli.sh --create-collection -z host1:2181,host2:2181/ambari-solr -c collection -cs conf_set"
+ "\n./solrCloudCli.sh --upload-config -z host1:2181,host2:2181/ambari-solr -d /tmp/myconfig_dir -cs config_set"
+ "\n./solrCloudCli.sh --download-config -z host1:2181,host2:2181/ambari-solr -cs config_set -d /tmp/myonfig_dir"
+ "\n./solrCloudCli.sh --check-config -z host1:2181,host2:2181/ambari-solr -cs config_set"
+ "\n./solrCloudCli.sh --create-shard -z host1:2181,host2:2181/ambari-solr -c collection -sn myshard"
+ + "\n./solrCloudCli.sh --remove-admin-handlers -z host1:2181,host2:2181/ambari-solr -c collection"
+ "\n./solrCloudCli.sh --create-znode -z host1:2181,host2:2181 -zn /ambari-solr"
+ "\n./solrCloudCli.sh --check-znode -z host1:2181,host2:2181 -zn /ambari-solr"
+ "\n./solrCloudCli.sh --cluster-prop -z host1:2181,host2:2181/ambari-solr -cpn urlScheme -cpn http"
+ "\n./solrCloudCli.sh --secure-znode -z host1:2181,host2:2181 -zn /ambari-solr -su logsearch,atlas,ranger --jaas-file /etc/myconf/jaas_file"
+ "\n./solrCloudCli.sh --unsecure-znode -z host1:2181,host2:2181 -zn /ambari-solr --jaas-file /etc/myconf/jaas_file"
+ "\n./solrCloudCli.sh --secure-solr-znode -z host1:2181,host2:2181 -zn /ambari-solr -su logsearch,atlas,ranger --jaas-file /etc/myconf/jaas_file"
- + "\n./solrCloudCli.sh --setup-kerberos-plugin -z host1:2181,host2:2181 -zn /ambari-solr --security-json-location /etc/infra-solr/conf/security.json\n";
+ + "\n./solrCloudCli.sh --setup-kerberos-plugin -z host1:2181,host2:2181 -zn /ambari-solr --security-json-location /etc/infra-solr/conf/security.json\n ";
public static void main(String[] args) {
Options options = new Options();
@@ -124,17 +126,22 @@ public static void main(String[] args) {
final Option secureSolrZnodeOption = Option.builder("ssz")
.longOpt(SECURE_SOLR_ZNODE_COMMAND)
- .desc("Set acls for solr znode")
+ .desc("Set acls for solr znode (command)")
.build();
final Option secureZnodeOption = Option.builder("sz")
.longOpt(SECURE_ZNODE_COMMAND)
- .desc("Set acls for znode")
+ .desc("Set acls for znode (command)")
.build();
final Option unsecureZnodeOption = Option.builder("uz")
.longOpt(UNSECURE_ZNODE_COMMAND)
- .desc("Disable security for znode")
+ .desc("Disable security for znode (command)")
+ .build();
+
+ final Option removeAdminHandlerOption = Option.builder("rah")
+ .longOpt(REMOVE_ADMIN_HANDLERS)
+ .desc("Remove AdminHandlers request handler from solrconfig.xml (command)")
.build();
final Option shardNameOption = Option.builder("sn")
@@ -328,6 +335,7 @@ public static void main(String[] args) {
options.addOption(helpOption);
options.addOption(retryOption);
+ options.addOption(removeAdminHandlerOption);
options.addOption(intervalOption);
options.addOption(zkConnectStringOption);
options.addOption(configSetOption);
@@ -414,10 +422,13 @@ public static void main(String[] args) {
} else if (cli.hasOption("uz")) {
command = UNSECURE_ZNODE_COMMAND;
validateRequiredOptions(cli, command, zkConnectStringOption, znodeOption, jaasFileOption);
+ } else if (cli.hasOption("rah")) {
+ command = REMOVE_ADMIN_HANDLERS;
+ validateRequiredOptions(cli, command, zkConnectStringOption, collectionOption);
} else {
List commands = Arrays.asList(CREATE_COLLECTION_COMMAND, CREATE_SHARD_COMMAND, UPLOAD_CONFIG_COMMAND,
DOWNLOAD_CONFIG_COMMAND, CONFIG_CHECK_COMMAND, SET_CLUSTER_PROP, CREATE_ZNODE, SECURE_ZNODE_COMMAND, UNSECURE_ZNODE_COMMAND,
- SECURE_SOLR_ZNODE_COMMAND, CHECK_ZNODE, SETUP_KERBEROS_PLUGIN);
+ SECURE_SOLR_ZNODE_COMMAND, CHECK_ZNODE, SETUP_KERBEROS_PLUGIN, REMOVE_ADMIN_HANDLERS);
helpFormatter.printHelp(CMD_LINE_SYNTAX, options);
exit(1, String.format("One of the supported commands is required (%s)", StringUtils.join(commands, "|")));
}
@@ -539,6 +550,9 @@ public static void main(String[] args) {
case SECURE_SOLR_ZNODE_COMMAND:
solrCloudClient = clientBuilder.build();
solrCloudClient.secureSolrZnode();
+ case REMOVE_ADMIN_HANDLERS:
+ solrCloudClient = clientBuilder.build();
+ solrCloudClient.removeAdminHandlerFromCollectionConfig();
break;
default:
throw new AmbariSolrCloudClientException(String.format("Not found command: '%s'", command));
diff --git a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
index 94796793..96c07a35 100644
--- a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
+++ b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
@@ -27,6 +27,7 @@
import org.apache.ambari.infra.solr.commands.GetShardsCommand;
import org.apache.ambari.infra.solr.commands.GetSolrHostsCommand;
import org.apache.ambari.infra.solr.commands.ListCollectionCommand;
+import org.apache.ambari.infra.solr.commands.RemoveAdminHandlersCommand;
import org.apache.ambari.infra.solr.commands.SecureSolrZNodeZkCommand;
import org.apache.ambari.infra.solr.commands.SecureZNodeZkCommand;
import org.apache.ambari.infra.solr.commands.SetClusterPropertyZkCommand;
@@ -257,6 +258,13 @@ public Collection getSolrHosts() throws Exception {
return new GetSolrHostsCommand(getRetryTimes(), getInterval()).run(this);
}
+ /**
+ * Remove solr.admin.AdminHandlers requestHandler from solrconfi.xml
+ */
+ public boolean removeAdminHandlerFromCollectionConfig() throws Exception {
+ return new RemoveAdminHandlersCommand(getRetryTimes(), getInterval()).run(this);
+ }
+
public String getZkConnectString() {
return zkConnectString;
}
diff --git a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/S3Uploader.java b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/S3Uploader.java
new file mode 100644
index 00000000..60b4e0af
--- /dev/null
+++ b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/S3Uploader.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.infra.solr;
+
+import java.io.File;
+
+import org.apache.commons.io.FileUtils;
+
+import com.amazonaws.auth.BasicAWSCredentials;
+import com.amazonaws.services.s3.AmazonS3Client;
+
+/**
+ * Uploads a file to S3, meant to be used by solrDataManager.py
+ */
+public class S3Uploader {
+ public static void main(String[] args) {
+ try {
+ String keyFilePath = args[0];
+ String bucketName = args[1];
+ String keyPrefix = args[2];
+ String filePath = args[3];
+
+ String keyFileContent = FileUtils.readFileToString(new File(keyFilePath)).trim();
+ String[] keys = keyFileContent.split(",");
+ String accessKey = keys[0];
+ String secretKey = keys[1];
+
+ BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
+ AmazonS3Client client = new AmazonS3Client(credentials);
+
+ File file = new File(filePath);
+ String key = keyPrefix + file.getName();
+
+ if (client.doesObjectExist(bucketName, key)) {
+ System.out.println("Object '" + key + "' already exists");
+ System.exit(0);
+ }
+
+ client.putObject(bucketName, key, file);
+ } catch (Exception e) {
+ e.printStackTrace(System.err);
+ System.exit(1);
+ }
+
+ System.exit(0);
+ }
+}
diff --git a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
new file mode 100644
index 00000000..32fae7b1
--- /dev/null
+++ b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.solr.commands;
+
+import org.apache.ambari.infra.solr.AmbariSolrCloudClient;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.SolrZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+public class RemoveAdminHandlersCommand extends AbstractZookeeperRetryCommand {
+
+ public RemoveAdminHandlersCommand(int maxRetries, int interval) {
+ super(maxRetries, interval);
+ }
+
+ @Override
+ protected Boolean executeZkCommand(AmbariSolrCloudClient client, SolrZkClient zkClient, SolrZooKeeper solrZooKeeper) throws Exception {
+ String solrConfigXmlPath = String.format("/configs/%s/solrconfig.xml", client.getCollection());
+ if (zkClient.exists(solrConfigXmlPath, true)) {
+ Stat stat = new Stat();
+ byte[] solrConfigXmlBytes = zkClient.getData(solrConfigXmlPath, null, stat, true);
+ String solrConfigStr = new String(solrConfigXmlBytes);
+ if (solrConfigStr.contains("class=\"solr.admin.AdminHandlers\"")) {
+ byte[] newSolrConfigXmlBytes = new String(solrConfigXmlBytes).replaceAll("(?s) ", "").getBytes();
+ zkClient.setData(solrConfigXmlPath, newSolrConfigXmlBytes, stat.getVersion() + 1, true);
+ }
+ }
+ return true;
+ }
+}
diff --git a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
index 34597c6d..e79773e5 100644
--- a/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
+++ b/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
@@ -19,9 +19,9 @@
package org.apache.ambari.infra.solr.commands;
import org.apache.ambari.infra.solr.AmbariSolrCloudClient;
+import org.apache.solr.common.cloud.ClusterProperties;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.SolrZooKeeper;
-import org.apache.solr.common.cloud.ZkStateReader;
public class SetClusterPropertyZkCommand extends AbstractZookeeperRetryCommand{
@@ -33,8 +33,8 @@ public SetClusterPropertyZkCommand(int maxRetries, int interval) {
protected String executeZkCommand(AmbariSolrCloudClient client, SolrZkClient zkClient, SolrZooKeeper solrZooKeeper) throws Exception {
String propertyName = client.getPropName();
String propertyValue = client.getPropValue();
- ZkStateReader reader = new ZkStateReader(zkClient);
- reader.setClusterProperty(propertyName, propertyValue);
+ ClusterProperties clusterProperties = new ClusterProperties(zkClient);
+ clusterProperties.setClusterProperty(propertyName, propertyValue);
return propertyValue;
}
}
diff --git a/ambari-infra-solr-client/src/main/python/solrDataManager.py b/ambari-infra-solr-client/src/main/python/solrDataManager.py
new file mode 100644
index 00000000..e0356bbc
--- /dev/null
+++ b/ambari-infra-solr-client/src/main/python/solrDataManager.py
@@ -0,0 +1,725 @@
+#!/usr/bin/python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import hashlib
+import json
+import logging
+import optparse
+import os
+import time
+import signal
+import sys
+
+from datetime import datetime, timedelta
+from subprocess import call, Popen, PIPE
+from urllib import quote, unquote
+from zipfile import ZipFile, ZIP_DEFLATED
+import tarfile
+import gzip
+import shutil
+
+VERSION = "1.0"
+
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+formatter = logging.Formatter("%(asctime)s - %(message)s")
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+verbose = False
+
+def parse_arguments():
+ parser = optparse.OptionParser("usage: %prog [options]", version="Solr Data Manager {0}".format(VERSION))
+
+ parser.add_option("-m", "--mode", dest="mode", type="string", help="delete | save")
+ parser.add_option("-s", "--solr-url", dest="solr_url", type="string", help="the url of the solr server including the port")
+ parser.add_option("-c", "--collection", dest="collection", type="string", help="the name of the solr collection")
+ parser.add_option("-f", "--filter-field", dest="filter_field", type="string", help="the name of the field to filter on")
+ parser.add_option("-r", "--read-block-size", dest="read_block_size", type="int", help="block size to use for reading from solr",
+ default=1000)
+ parser.add_option("-w", "--write-block-size", dest="write_block_size", type="int", help="number of records in the output files",
+ default=100000)
+ parser.add_option("-i", "--id-field", dest="id_field", type="string", help="the name of the id field", default="id")
+
+ end_group = optparse.OptionGroup(parser, "specifying the end of the range")
+ end_group.add_option("-e", "--end", dest="end", type="string", help="end of the range")
+ end_group.add_option("-d", "--days", dest="days", type="int", help="number of days to keep")
+ parser.add_option_group(end_group)
+
+ parser.add_option("-o", "--date-format", dest="date_format", type="string", help="the date format to use for --days",
+ default="%Y-%m-%dT%H:%M:%S.%fZ")
+
+ parser.add_option("-q", "--additional-filter", dest="additional_filter", type="string", help="additional solr filter")
+ parser.add_option("-j", "--name", dest="name", type="string", help="name included in result files")
+
+ parser.add_option("-g", "--ignore-unfinished-uploading", dest="ignore_unfinished_uploading", action="store_true", default=False)
+
+ parser.add_option("--json-file", dest="json_file", help="create a json file instead of line delimited json", action="store_true", default=False)
+ parser.add_option("-z", "--compression", dest="compression", help="none | tar.gz | tar.bz2 | zip | gz", default="gz")
+
+ parser.add_option("-k", "--solr-keytab", dest="solr_keytab", type="string", help="the keytab for a kerberized solr")
+ parser.add_option("-n", "--solr-principal", dest="solr_principal", type="string", help="the principal for a kerberized solr")
+
+ parser.add_option("-a", "--hdfs-keytab", dest="hdfs_keytab", type="string", help="the keytab for a kerberized hdfs")
+ parser.add_option("-l", "--hdfs-principal", dest="hdfs_principal", type="string", help="the principal for a kerberized hdfs")
+
+ parser.add_option("-u", "--hdfs-user", dest="hdfs_user", type="string", help="the user for accessing hdfs")
+ parser.add_option("-p", "--hdfs-path", dest="hdfs_path", type="string", help="the hdfs path to upload to")
+
+ parser.add_option("-t", "--key-file-path", dest="key_file_path", type="string", help="the file that contains S3 ,")
+ parser.add_option("-b", "--bucket", dest="bucket", type="string", help="the bucket name for S3 upload")
+ parser.add_option("-y", "--key-prefix", dest="key_prefix", type="string", help="the key prefix for S3 upload")
+
+ parser.add_option("-x", "--local-path", dest="local_path", type="string", help="the local path to save the files to")
+
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False)
+
+ (options, args) = parser.parse_args()
+
+ for r in ["mode", "solr_url", "collection", "filter_field"]:
+ if options.__dict__[r] is None:
+ print "argument '{0}' is mandatory".format(r)
+ parser.print_help()
+ sys.exit()
+
+ mode_values = ["delete", "save"]
+ if options.mode not in mode_values:
+ print "mode must be one of {0}".format(" | ".join(mode_values))
+ parser.print_help()
+ sys.exit()
+
+ if options.mode == "delete":
+ for r in ["name", "compression", "hdfs_keytab", "hdfs_principal", "hdfs_user", "hdfs_path", "key_file_path", "bucket", "key_prefix", "local_path"]:
+ if options.__dict__[r] is not None:
+ print "argument '{0}' may not be specified in delete mode".format(r)
+ parser.print_help()
+ sys.exit()
+
+ if options.__dict__["end"] is None and options.__dict__["days"] is None or \
+ options.__dict__["end"] is not None and options.__dict__["days"] is not None:
+ print "exactly one of 'end' or 'days' must be specfied"
+ parser.print_help()
+ sys.exit()
+
+ is_any_solr_kerberos_property = options.__dict__["solr_keytab"] is not None or options.__dict__["solr_principal"] is not None
+ is_all_solr_kerberos_property = options.__dict__["solr_keytab"] is not None and options.__dict__["solr_principal"] is not None
+ if is_any_solr_kerberos_property and not is_all_solr_kerberos_property:
+ print "either both 'solr-keytab' and 'solr-principal' must be specfied, or neither of them"
+ parser.print_help()
+ sys.exit()
+
+ compression_values = ["none", "tar.gz", "tar.bz2", "zip", "gz"]
+ if options.compression not in compression_values:
+ print "compression must be one of {0}".format(" | ".join(compression_values))
+ parser.print_help()
+ sys.exit()
+
+ is_any_hdfs_kerberos_property = options.__dict__["hdfs_keytab"] is not None or options.__dict__["hdfs_principal"] is not None
+ is_all_hdfs_kerberos_property = options.__dict__["hdfs_keytab"] is not None and options.__dict__["hdfs_principal"] is not None
+ if is_any_hdfs_kerberos_property and not is_all_hdfs_kerberos_property:
+ print "either both 'hdfs_keytab' and 'hdfs_principal' must be specfied, or neither of them"
+ parser.print_help()
+ sys.exit()
+
+ is_any_hdfs_property = options.__dict__["hdfs_user"] is not None or options.__dict__["hdfs_path"] is not None
+ is_all_hdfs_property = options.__dict__["hdfs_user"] is not None and options.__dict__["hdfs_path"] is not None
+ if is_any_hdfs_property and not is_all_hdfs_property:
+ print "either both 'hdfs_user' and 'hdfs_path' must be specfied, or neither of them"
+ parser.print_help()
+ sys.exit()
+
+ is_any_s3_property = options.__dict__["key_file_path"] is not None or options.__dict__["bucket"] is not None or \
+ options.__dict__["key_prefix"] is not None
+ is_all_s3_property = options.__dict__["key_file_path"] is not None and options.__dict__["bucket"] is not None and \
+ options.__dict__["key_prefix"] is not None
+ if is_any_s3_property and not is_all_s3_property:
+ print "either all the S3 arguments ('key_file_path', 'bucket', 'key_prefix') must be specfied, or none of them"
+ parser.print_help()
+ sys.exit()
+
+ if options.mode == "save":
+ count = (1 if is_any_hdfs_property else 0) + (1 if is_any_s3_property else 0) + \
+ (1 if options.__dict__["local_path"] is not None else 0)
+ if count != 1:
+ print "exactly one of the HDFS arguments ('hdfs_user', 'hdfs_path') or the S3 arguments ('key_file_path', 'bucket', 'key_prefix') or the 'local_path' argument must be specified"
+ parser.print_help()
+ sys.exit()
+
+ if options.__dict__["hdfs_keytab"] is not None and options.__dict__["hdfs_user"] is None:
+ print "HDFS kerberos keytab and principal may only be specified if the upload target is HDFS"
+ parser.print_help()
+ sys.exit()
+
+ print("You are running Solr Data Manager {0} with arguments:".format(VERSION))
+ print(" mode: " + options.mode)
+ print(" solr-url: " + options.solr_url)
+ print(" collection: " + options.collection)
+ print(" filter-field: " + options.filter_field)
+ if options.mode == "save":
+ print(" id-field: " + options.id_field)
+ if options.__dict__["end"] is not None:
+ print(" end: " + options.end)
+ else:
+ print(" days: " + str(options.days))
+ print(" date-format: " + options.date_format)
+ if options.__dict__["additional_filter"] is not None:
+ print(" additional-filter: " + str(options.additional_filter))
+ if options.__dict__["name"] is not None:
+ print(" name: " + str(options.name))
+ if options.mode == "save":
+ print(" read-block-size: " + str(options.read_block_size))
+ print(" write-block-size: " + str(options.write_block_size))
+ print(" ignore-unfinished-uploading: " + str(options.ignore_unfinished_uploading))
+ if (options.__dict__["solr_keytab"] is not None):
+ print(" solr-keytab: " + options.solr_keytab)
+ print(" solr-principal: " + options.solr_principal)
+ if options.mode == "save":
+ print(" output: " + ("json" if options.json_file else "line-delimited-json"))
+ print(" compression: " + options.compression)
+ if (options.__dict__["hdfs_keytab"] is not None):
+ print(" hdfs-keytab: " + options.hdfs_keytab)
+ print(" hdfs-principal: " + options.hdfs_principal)
+ if (options.__dict__["hdfs_user"] is not None):
+ print(" hdfs-user: " + options.hdfs_user)
+ print(" hdfs-path: " + options.hdfs_path)
+ if (options.__dict__["key_file_path"] is not None):
+ print(" key-file-path: " + options.key_file_path)
+ print(" bucket: " + options.bucket)
+ print(" key-prefix: " + options.key_prefix)
+ if (options.__dict__["local_path"] is not None):
+ print(" local-path: " + options.local_path)
+ print(" verbose: " + str(options.verbose))
+ print
+
+ if options.__dict__["additional_filter"] is not None and options.__dict__["name"] is None:
+ go = False
+ while not go:
+ sys.stdout.write("It is recommended to set --name in case of any additional filter is set.\n")
+ sys.stdout.write("Are you sure that you want to proceed without a name (yes/no)? ")
+ choice = raw_input().lower()
+ if choice in ['yes', 'ye', 'y']:
+ go = True
+ elif choice in ['no', 'n']:
+ sys.exit()
+
+ return options
+
+def set_log_level():
+ if verbose:
+ logger.setLevel(logging.DEBUG)
+ else:
+ logger.setLevel(logging.INFO)
+
+def get_end(options):
+ if options.end:
+ return options.end
+ else:
+ d = datetime.now() - timedelta(days=options.days)
+ end = d.strftime(options.date_format)
+ logger.info("The end date will be: %s", end)
+ return end
+
+def delete(solr_url, collection, filter_field, end, solr_keytab, solr_principal):
+ logger.info("Deleting data where %s <= %s", filter_field, end)
+ solr_kinit_command = None
+ if solr_keytab:
+ solr_kinit_command = "kinit -kt {0} {1}".format(solr_keytab, solr_principal)
+ curl_prefix = "curl -k --negotiate -u : "
+ else:
+ curl_prefix = "curl -k"
+
+ delete_range = "{0}:[*+TO+\"{1}\"]".format(filter_field, end)
+ delete_query = quote("{0}:[*+TO+\"{1}\"]".format(filter_field, end), safe="/+\"*")
+ delete_command = "{0}/{1}/update?stream.body={2} &commit=true&wt=json" \
+ .format(solr_url, collection, delete_query)
+
+ query_solr(solr_kinit_command, delete_command, "{0} {1}".format(curl_prefix, delete_command), "Deleting")
+
+def save(solr_url, collection, filter_field, id_field, range_end, read_block_size, write_block_size,
+ ignore_unfinished_uploading, additional_filter, name, solr_keytab, solr_principal, json_file,
+ compression, hdfs_keytab, hdfs_principal, hdfs_user, hdfs_path, key_file_path, bucket, key_prefix, local_path):
+ solr_kinit_command = None
+ if solr_keytab:
+ solr_kinit_command = "kinit -kt {0} {1}".format(solr_keytab, solr_principal)
+ curl_prefix = "curl -k --negotiate -u : "
+ else:
+ curl_prefix = "curl -k"
+
+ hdfs_kinit_command = None
+ if hdfs_keytab:
+ hdfs_kinit_command = "sudo -u {0} kinit -kt {1} {2}".format(hdfs_user, hdfs_keytab, hdfs_principal)
+
+ if options.hdfs_path:
+ ensure_hdfs_path(hdfs_kinit_command, hdfs_user, hdfs_path)
+
+ working_dir = get_working_dir(solr_url, collection)
+ handle_unfinished_uploading(solr_kinit_command, hdfs_kinit_command, curl_prefix, working_dir, ignore_unfinished_uploading)
+ save_data(solr_kinit_command, hdfs_kinit_command, curl_prefix, solr_url, collection, filter_field, id_field, range_end,
+ read_block_size, write_block_size, working_dir, additional_filter, name, json_file, compression,
+ hdfs_user, hdfs_path, key_file_path, bucket, key_prefix, local_path)
+
+def ensure_hdfs_path(hdfs_kinit_command, hdfs_user, hdfs_path):
+ if hdfs_kinit_command:
+ run_kinit(hdfs_kinit_command, "HDFS")
+
+ try:
+ hdfs_create_dir_command = "sudo -u {0} hadoop fs -mkdir -p {1}".format(hdfs_user, hdfs_path)
+ logger.debug("Ensuring that the HDFS path %s exists:\n%s", hdfs_path, hdfs_create_dir_command)
+ result = call(hdfs_create_dir_command.split())
+ except Exception as e:
+ print
+ logger.warn("Could not execute hdfs ensure dir command:\n%s", hdfs_create_dir_command)
+ logger.warn(str(e))
+ sys.exit()
+
+ if result != 0:
+ print
+ logger.warn("Could not ensure HDFS dir command:\n%s", hdfs_create_dir_command)
+ logger.warn(str(err))
+ sys.exit()
+
+def get_working_dir(solr_url, collection):
+ md5 = hashlib.md5()
+ md5.update(solr_url)
+ md5.update(collection)
+ hash = md5.hexdigest()
+ working_dir = "/tmp/solrDataManager/{0}".format(hash)
+
+ if not(os.path.isdir(working_dir)):
+ os.makedirs(working_dir)
+
+ logger.debug("Working directory is %s", working_dir)
+ return working_dir
+
+def handle_unfinished_uploading(solr_kinit_command, hdfs_kinit_command, curl_prefix, working_dir, ignore_unfinished_uploading):
+ command_json_path = "{0}/command.json".format(working_dir)
+ if os.path.isfile(command_json_path):
+ with open(command_json_path) as command_file:
+ command = json.load(command_file)
+
+ if "upload" in command.keys() and ignore_unfinished_uploading:
+ logger.info("Ignoring unfinished uploading left by previous run")
+ os.remove(command_json_path)
+ return
+
+ if "upload" in command.keys():
+ logger.info("Previous run has left unfinished uploading")
+ logger.info("You may try to run the program with '-g' or '--ignore-unfinished-uploading' to ignore it if it keeps on failing")
+
+ if command["upload"]["type"] == "hdfs":
+ upload_file_hdfs(hdfs_kinit_command, command["upload"]["command"], command["upload"]["upload_file_path"],
+ command["upload"]["hdfs_path"], command["upload"]["hdfs_user"])
+ elif command["upload"]["type"] == "s3":
+ upload_file_s3(command["upload"]["command"], command["upload"]["upload_file_path"], command["upload"]["bucket"],
+ command["upload"]["key_prefix"])
+ elif command["upload"]["type"] == "local":
+ upload_file_local(command["upload"]["command"], command["upload"]["upload_file_path"], command["upload"]["local_path"])
+ else:
+ logger.warn("Unknown upload type: %s", command["upload"]["type"])
+ sys.exit()
+
+ if "delete" in command.keys():
+ delete_data(solr_kinit_command, curl_prefix, command["delete"]["command"], command["delete"]["collection"],
+ command["delete"]["filter_field"], command["delete"]["id_field"], command["delete"]["prev_lot_end_value"],
+ command["delete"]["prev_lot_end_id"])
+
+ os.remove(command_json_path)
+
+def save_data(solr_kinit_command, hdfs_kinit_command, curl_prefix, solr_url, collection, filter_field, id_field,
+ range_end, read_block_size, write_block_size, working_dir, additional_filter, name, json_file,
+ compression, hdfs_user, hdfs_path, key_file_path, bucket, key_prefix, local_path):
+ logger.info("Starting to save data")
+
+ tmp_file_path = "{0}/tmp.json".format(working_dir)
+
+ prev_lot_end_value = None
+ prev_lot_end_id = None
+
+ if additional_filter:
+ q = quote("{0}+AND+{1}:[*+TO+\"{2}\"]".format(additional_filter, filter_field, range_end), safe="/+\"*")
+ else:
+ q = quote("{0}:[*+TO+\"{1}\"]".format(filter_field, range_end), safe="/+\"*")
+
+ sort = quote("{0}+asc,{1}+asc".format(filter_field, id_field), safe="/+\"*")
+ solr_query_url_prefix = "{0}/{1}/select?q={2}&sort={3}&rows={4}&wt=json".format(solr_url, collection, q, sort, read_block_size)
+
+ done = False
+ total_records = 0
+ while not done:
+ results = create_block(tmp_file_path, solr_kinit_command, curl_prefix, solr_query_url_prefix, filter_field,
+ id_field, range_end, write_block_size, prev_lot_end_value, prev_lot_end_id, json_file)
+ done = results[0]
+ records = results[1]
+ prev_lot_end_value = results[2]
+ prev_lot_end_id = results[3]
+
+ if records > 0:
+ upload_block(solr_kinit_command, hdfs_kinit_command, curl_prefix, solr_url, collection, filter_field, id_field,
+ working_dir, tmp_file_path, name, prev_lot_end_value, prev_lot_end_id, hdfs_user, hdfs_path,
+ key_file_path, bucket, key_prefix, local_path, compression)
+ total_records += records
+ logger.info("A total of %d records are saved", total_records)
+
+def create_block(tmp_file_path, solr_kinit_command, curl_prefix, solr_query_url_prefix, filter_field, id_field, range_end,
+ write_block_size, prev_lot_end_value, prev_lot_end_id, json_file):
+ if os.path.exists(tmp_file_path):
+ os.remove(tmp_file_path)
+ tmp_file = open(tmp_file_path, 'w')
+ logger.debug("Created tmp file %s", tmp_file_path)
+
+ init_file(tmp_file, json_file)
+ records = 0
+ done = False
+ while records < write_block_size:
+ if prev_lot_end_value:
+ fq_prev_end_rest = "({0}:\"{1}\"+AND+{2}:{{\"{3}\"+TO+*])".format(filter_field, prev_lot_end_value, id_field,
+ prev_lot_end_id)
+ fq_new = "{0}:{{\"{1}\"+TO+\"{2}\"]".format(filter_field, prev_lot_end_value, range_end)
+ fq = "{0}+OR+{1}".format(fq_prev_end_rest, fq_new)
+ else:
+ fq = "{0}:[*+TO+\"{1}\"]".format(filter_field, range_end)
+
+ url = "{0}&fq={1}".format(solr_query_url_prefix, quote(fq, safe="/+\"*"))
+ curl_command = "{0} {1}".format(curl_prefix, url)
+
+ rsp = query_solr(solr_kinit_command, url, curl_command, "Obtaining")
+
+ if rsp['response']['numFound'] == 0:
+ done = True
+ break
+
+ for doc in rsp['response']['docs']:
+ last_doc = doc
+ add_line(tmp_file, doc, json_file, records)
+ records += 1
+ if records == write_block_size:
+ break
+
+ prev_lot_end_value = last_doc[filter_field]
+ prev_lot_end_id = last_doc[id_field]
+ sys.stdout.write("\r{0} records are written".format(records))
+ sys.stdout.flush()
+ if verbose and records < write_block_size:
+ print
+ logger.debug("Collecting next lot of data")
+
+ finish_file(tmp_file, json_file)
+ sys.stdout.write("\n")
+ logger.debug("Finished data collection")
+ return [done, records, prev_lot_end_value, prev_lot_end_id]
+
+def init_file(tmp_file, json_file):
+ if json_file:
+ tmp_file.write("{\n")
+
+def add_line(tmp_file, doc, json_file, records):
+ if records > 0:
+ if json_file:
+ tmp_file.write(",\n")
+ else:
+ tmp_file.write("\n")
+
+ tmp_file.write(json.dumps(doc))
+
+def finish_file(tmp_file, json_file):
+ if json_file:
+ tmp_file.write("\n}")
+
+def upload_block(solr_kinit_command, hdfs_kinit_command, curl_prefix, solr_url, collection, filter_field, id_field,
+ working_dir, tmp_file_path, name, prev_lot_end_value, prev_lot_end_id, hdfs_user, hdfs_path,
+ key_file_path, bucket, key_prefix, local_path, compression):
+ if name:
+ file_name = "{0}_-_{1}_-_{2}_-_{3}".format(collection, name, prev_lot_end_value, prev_lot_end_id).replace(':', '_')
+ else:
+ file_name = "{0}_-_{1}_-_{2}".format(collection, prev_lot_end_value, prev_lot_end_id).replace(':', '_')
+
+ upload_file_path = compress_file(working_dir, tmp_file_path, file_name, compression)
+
+ upload_command = create_command_file(True, working_dir, upload_file_path, solr_url, collection, filter_field, id_field,
+ prev_lot_end_value, prev_lot_end_id, hdfs_user, hdfs_path, key_file_path, bucket,
+ key_prefix, local_path)
+ if hdfs_user:
+ upload_file_hdfs(hdfs_kinit_command, upload_command, upload_file_path, hdfs_path, hdfs_user)
+ elif key_file_path:
+ upload_file_s3(upload_command, upload_file_path, bucket, key_prefix)
+ elif local_path:
+ upload_file_local(upload_command, upload_file_path, local_path)
+ else:
+ logger.warn("Unknown upload destination")
+ sys.exit()
+
+ delete_command = create_command_file(False, working_dir, upload_file_path, solr_url, collection, filter_field, id_field,
+ prev_lot_end_value, prev_lot_end_id, None, None, None, None, None, None)
+ delete_data(solr_kinit_command, curl_prefix, delete_command, collection, filter_field, id_field, prev_lot_end_value, prev_lot_end_id)
+
+ os.remove("{0}/command.json".format(working_dir))
+
+def compress_file(working_dir, tmp_file_path, file_name, compression):
+ data_file_name = "{0}.json".format(file_name)
+ if compression == "none":
+ upload_file_path = "{0}/{1}.json".format(working_dir, file_name)
+ os.rename(tmp_file_path, upload_file_path)
+ elif compression == "tar.gz":
+ upload_file_path = "{0}/{1}.json.tar.gz".format(working_dir, file_name)
+ tar = tarfile.open(upload_file_path, mode="w:gz")
+ try:
+ tar.add(tmp_file_path, arcname=data_file_name)
+ finally:
+ tar.close()
+ elif compression == "tar.bz2":
+ upload_file_path = "{0}/{1}.json.tar.bz2".format(working_dir, file_name)
+ tar = tarfile.open(upload_file_path, mode="w:bz2")
+ try:
+ tar.add(tmp_file_path, arcname=data_file_name)
+ finally:
+ tar.close()
+ elif compression == "zip":
+ upload_file_path = "{0}/{1}.json.zip".format(working_dir, file_name)
+ zip = ZipFile(upload_file_path, 'w')
+ zip.write(tmp_file_path, data_file_name, ZIP_DEFLATED)
+ elif compression == "gz":
+ upload_file_path = "{0}/{1}.json.gz".format(working_dir, file_name)
+ gz = gzip.open(upload_file_path, mode="wb")
+ f = open(tmp_file_path)
+ try:
+ shutil.copyfileobj(f, gz)
+ finally:
+ gz.close()
+ f.close()
+ else:
+ logger.warn("Unknown compression type")
+ sys.exit()
+
+ logger.info("Created data file %s", data_file_name)
+
+
+ return upload_file_path
+
+def create_command_file(upload, working_dir, upload_file_path, solr_url, collection, filter_field, id_field, prev_lot_end_value,
+ prev_lot_end_id, hdfs_user, hdfs_path, key_file_path, bucket, key_prefix, local_path):
+ commands = {}
+
+ if upload:
+ logger.debug("Creating command file with upload and delete instructions in case of an interruption")
+ else:
+ logger.debug("Creating command file with delete instructions in case of an interruption")
+
+ if upload:
+ if hdfs_path:
+ upload_command = "sudo -u {0} hadoop fs -put {1} {2}".format(hdfs_user, upload_file_path, hdfs_path)
+ upload_command_data = {}
+ upload_command_data["type"] = "hdfs"
+ upload_command_data["command"] = upload_command
+ upload_command_data["upload_file_path"] = upload_file_path
+ upload_command_data["hdfs_path"] = hdfs_path
+ upload_command_data["hdfs_user"] = hdfs_user
+ commands["upload"] = upload_command_data
+ elif key_file_path:
+ upload_command = "java -cp {0}/libs/* org.apache.ambari.infra.solr.S3Uploader {1} {2} {3} {4}".format( \
+ os.path.dirname(os.path.realpath(__file__)), key_file_path, bucket, key_prefix, upload_file_path)
+ upload_command_data = {}
+ upload_command_data["type"] = "s3"
+ upload_command_data["command"] = upload_command
+ upload_command_data["upload_file_path"] = upload_file_path
+ upload_command_data["bucket"] = bucket
+ upload_command_data["key_prefix"] = key_prefix
+ commands["upload"] = upload_command_data
+ elif local_path:
+ upload_command = "mv {0} {1}".format(upload_file_path, local_path)
+ upload_command_data = {}
+ upload_command_data["type"] = "local"
+ upload_command_data["command"] = upload_command
+ upload_command_data["upload_file_path"] = upload_file_path
+ upload_command_data["local_path"] = local_path
+ commands["upload"] = upload_command_data
+ else:
+ logger.warn("Unknown upload destination")
+ sys.exit()
+
+
+ delete_prev = "{0}:[*+TO+\"{1}\"]".format(filter_field, prev_lot_end_value)
+ delete_last = "({0}:\"{1}\"+AND+{2}:[*+TO+\"{3}\"])".format(filter_field, prev_lot_end_value, id_field, prev_lot_end_id)
+ delete_query = quote("{0}+OR+{1}".format(delete_prev, delete_last), safe="/+\"*")
+ delete_command = "{0}/{1}/update?stream.body={2} &commit=true&wt=json" \
+ .format(solr_url, collection, delete_query)
+ delete_command_data = {}
+ delete_command_data["command"] = delete_command
+ delete_command_data["collection"] = collection
+ delete_command_data["filter_field"] = filter_field
+ delete_command_data["id_field"] = id_field
+ delete_command_data["prev_lot_end_value"] = prev_lot_end_value
+ delete_command_data["prev_lot_end_id"] = prev_lot_end_id
+ commands["delete"] = delete_command_data
+
+ command_file_path = "{0}/command.json".format(working_dir)
+ command_file_path_tmp = "{0}.tmp".format(command_file_path)
+ cft = open(command_file_path_tmp, 'w')
+ cft.write(json.dumps(commands, indent=4))
+ os.rename(command_file_path_tmp, command_file_path)
+
+ logger.debug("Command file %s was created", command_file_path)
+
+ if upload:
+ return upload_command
+ else:
+ return delete_command
+
+def upload_file_hdfs(hdfs_kinit_command, upload_command, upload_file_path, hdfs_path, hdfs_user):
+ if hdfs_kinit_command:
+ run_kinit(hdfs_kinit_command, "HDFS")
+
+ try:
+ hdfs_file_exists_command = "sudo -u {0} hadoop fs -test -e {1}".format(hdfs_user, hdfs_path + os.path.basename(upload_file_path))
+ logger.debug("Checking if file already exists on hdfs:\n%s", hdfs_file_exists_command)
+ hdfs_file_exists = (0 == call(hdfs_file_exists_command.split()))
+ except Exception as e:
+ print
+ logger.warn("Could not execute command to check if file already exists on HDFS:\n%s", hdfs_file_exists_command)
+ logger.warn(str(e))
+ sys.exit()
+
+ if os.path.isfile(upload_file_path) and not hdfs_file_exists:
+ try:
+ logger.debug("Uploading file to hdfs:\n%s", upload_command)
+ result = call(upload_command.split())
+ except Exception as e:
+ print
+ logger.warn("Could not execute command to upload file to HDFS:\n%s", upload_command)
+ logger.warn(str(e))
+ sys.exit()
+
+ if result != 0:
+ logger.warn("Could not upload file to HDFS with command:\n%s", upload_command)
+ sys.exit()
+
+ logger.info("File %s was uploaded to hdfs %s", os.path.basename(upload_file_path), hdfs_path)
+ os.remove(upload_file_path)
+
+def upload_file_s3(upload_command, upload_file_path, bucket, key_prefix):
+ if os.path.isfile(upload_file_path):
+ try:
+ logger.debug("Uploading file to s3:\n%s", upload_command)
+ result = call(upload_command.split())
+ except Exception as e:
+ print
+ logger.warn("Could not execute command to upload file to S3:\n%s", upload_command)
+ logger.warn(str(e))
+ sys.exit()
+
+ if result != 0:
+ logger.warn("Could not upload file to S3 with command:\n%s", upload_command)
+ sys.exit()
+
+ logger.info("File %s was uploaded to s3 bucket '%s', key '%s'", os.path.basename(upload_file_path), bucket,
+ key_prefix + os.path.basename(upload_file_path))
+ os.remove(upload_file_path)
+
+def upload_file_local(upload_command, upload_file_path, local_path):
+ if os.path.exists(local_path) and not os.path.isdir(local_path):
+ logger.warn("Local path %s exists, but not a directory, can not save there", local_path)
+ if not os.path.isdir(local_path):
+ os.mkdir(local_path)
+ logger.debug("Directory %s was created", local_path)
+
+ try:
+ logger.debug("Moving file to local directory %s with command\n%s", local_path, upload_command)
+ call(upload_command.split())
+ logger.info("File %s was moved to local directory %s", os.path.basename(upload_file_path), local_path)
+ except Exception as e:
+ print
+ logger.warn("Could not execute move command command:\n%s", upload_command)
+ logger.warn(str(e))
+ sys.exit()
+
+def delete_data(solr_kinit_command, curl_prefix, delete_command, collection, filter_field, id_field, prev_lot_end_value,
+ prev_lot_end_id):
+ query_solr(solr_kinit_command, delete_command, "{0} {1}".format(curl_prefix, delete_command), "Deleting")
+ logger.info("Deleted data from collection %s where %s,%s < %s,%s", collection, filter_field, id_field, prev_lot_end_value,
+ prev_lot_end_id)
+
+def query_solr(solr_kinit_command, url, curl_command, action):
+ if solr_kinit_command:
+ run_kinit(solr_kinit_command, "Solr")
+
+ try:
+ logger.debug("%s data from solr:\n%s", action, curl_command)
+ process = Popen(curl_command.split(), stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ except Exception as e:
+ print
+ logger.warn("Could not execute curl command:\n%s", curl_command)
+ logger.warn(str(e))
+ sys.exit()
+
+ out, err = process.communicate()
+ if process.returncode != 0:
+ print
+ logger.warn("Could not execute curl command:\n%s", curl_command)
+ logger.warn(str(err))
+ sys.exit()
+
+ true = True # needed to be able to eval 'true' in the returned json
+ rsp = eval(str(out))
+ if rsp["responseHeader"]["status"] != 0:
+ print
+ logger.warn("Could not execute solr query:\n%s", unquote(url))
+ logger.warn(rsp["error"]["msg"])
+ sys.exit()
+
+ return rsp
+
+def run_kinit(kinit_command, program):
+ try:
+ logger.debug("Running kinit for %s:\n%s", program, kinit_command)
+ result = call(kinit_command.split())
+ except Exception as e:
+ print
+ logger.warn("Could not execute %s kinit command:\n%s", program, kinit_command)
+ logger.warn(str(e))
+ sys.exit()
+
+ if result != 0:
+ print
+ logger.warn("%s kinit command was not successful:\n%s", program, kinit_command)
+ sys.exit()
+
+if __name__ == '__main__':
+ try:
+ start_time = time.time()
+
+ options = parse_arguments()
+ verbose = options.verbose
+ set_log_level()
+
+ end = get_end(options)
+
+ if options.mode == "delete":
+ delete(options.solr_url, options.collection, options.filter_field, end, options.solr_keytab, options.solr_principal)
+ elif options.mode == "save":
+ save(options.solr_url, options.collection, options.filter_field, options.id_field, end, options.read_block_size,
+ options.write_block_size, options.ignore_unfinished_uploading, options.additional_filter, options.name,
+ options.solr_keytab, options.solr_principal, options.json_file, options.compression,
+ options.hdfs_keytab, options.hdfs_principal, options.hdfs_user, options.hdfs_path, options.key_file_path,
+ options.bucket, options.key_prefix, options.local_path)
+ else:
+ logger.warn("Unknown mode: %s", options.mode)
+
+ print("--- %s seconds ---" % (time.time() - start_time))
+ except KeyboardInterrupt:
+ print
+ sys.exit(128 + signal.SIGINT)
diff --git a/ambari-infra-solr-client/src/main/resources/log4j.properties b/ambari-infra-solr-client/src/main/resources/log4j.properties
index e8dca125..47795967 100644
--- a/ambari-infra-solr-client/src/main/resources/log4j.properties
+++ b/ambari-infra-solr-client/src/main/resources/log4j.properties
@@ -23,9 +23,13 @@ log4j.appender.stdout.Threshold=INFO
log4j.appender.stdout.Target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%m%n
+log4j.appender.stdout.filter.lvlRangeFilter=org.apache.log4j.varia.LevelRangeFilter
+log4j.appender.stdout.filter.lvlRangeFilter.LevelMax=WARN
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.Threshold=ERROR
log4j.appender.stderr.Target=System.err
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
-log4j.appender.stderr.layout.ConversionPattern=%m%n
\ No newline at end of file
+log4j.appender.stderr.layout.ConversionPattern=%m%n
+log4j.appender.stderr.filter.lvlRangeFilter=org.apache.log4j.varia.LevelRangeFilter
+log4j.appender.stderr.filter.lvlRangeFilter.LevelMin=ERROR
\ No newline at end of file
diff --git a/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh b/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh
index cd47f065..ac7d59fe 100644
--- a/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh
+++ b/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh
@@ -16,5 +16,11 @@
JVM="java"
sdir="`dirname \"$0\"`"
+ldir="`dirname "$(readlink -f "$0")"`"
-PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir:$sdir/libs/*" org.apache.ambari.logsearch.solr.AmbariSolrCloudCLI ${1+"$@"}
\ No newline at end of file
+DIR="$sdir"
+if [ "$sdir" != "$ldir" ]; then
+ DIR="$ldir"
+fi
+
+PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$DIR:$DIR/libs/*" org.apache.ambari.infra.solr.AmbariSolrCloudCLI ${1+"$@"}
\ No newline at end of file
diff --git a/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh b/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
new file mode 100755
index 00000000..1d219d6c
--- /dev/null
+++ b/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
@@ -0,0 +1,162 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JVM="java"
+sdir="`dirname \"$0\"`"
+ldir="`dirname "$(readlink -f "$0")"`"
+
+DIR="$sdir"
+if [ "$sdir" != "$ldir" ]; then
+ DIR="$ldir"
+fi
+: ${JAVA_HOME:?"Please set the JAVA_HOME for lucene index migration!"}
+
+function print_help() {
+ cat << EOF
+
+ Usage: solrIndexHelper.sh [] []
+
+ commands:
+ upgrade-index Check and upgrade solr index data in core directories.
+ run-check-index-tool call 'java -cp ... org.apache.lucene.index.IndexUpgrader' directly
+ run-upgrade-index-tool call 'java -cp ... org.apache.lucene.index.CheckIndex' directly
+ help print usage
+
+
+ upgrade-index command arguments:
+ -d, --index-data-dir Location of the solr cores (e.g.: /opt/ambari_infra_solr/data)
+ -c, --core-filter Comma separated name filters of core directoies (default: hadoop_logs,audit_logs,history)
+ -f, --force Force to start index upgrade, even is the version is at least 6.
+
+EOF
+}
+
+function upgrade_core() {
+ local INDEX_DIR=${1:?"usage: e.g.: /opt/ambari_infra_solr/data"}
+ local FORCE_UPDATE=${2:?"usage e.g.: true"}
+ local SOLR_CORE_FILTERS=${3:?"usage: e.g.: hadoop_logs,audit_logs,history"}
+
+ SOLR_CORE_FILTER_ARR=$(echo $SOLR_CORE_FILTERS | sed "s/,/ /g")
+
+ for coll in $SOLR_CORE_FILTER_ARR; do
+ if [[ "$1" == *"$coll"* ]]; then
+ echo "Core '$1' dir name contains $coll (core filter)'";
+ version=$(PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$DIR/libs/lucene-core-6.6.0.jar:$DIR/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.CheckIndex -fast $1|grep " version="|sed -e 's/.*=//g'|head -1)
+ if [ -z $version ] ; then
+ echo "Core '$1' - Empty index?"
+ return
+ fi
+ majorVersion=$(echo $version|cut -c 1)
+ if [ $majorVersion -ge 6 ] && [ $FORCE_UPDATE == "false" ] ; then
+ echo "Core '$1' - Already on version $version, not upgrading. Use -f or --force option to run upgrade anyway."
+ else
+ echo "Core '$1' - Index version is $version, upgrading ..."
+ PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$DIR/libs/lucene-core-6.6.0.jar:$DIR/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.IndexUpgrader -delete-prior-commits $1
+ echo "Upgrading core '$1' has finished"
+ fi
+ fi
+ done
+}
+
+function upgrade_index() {
+ while [[ $# -gt 0 ]]
+ do
+ key="$1"
+ case $key in
+ -c|--core-filters)
+ local SOLR_CORE_FILTERS="$2"
+ shift 2
+ ;;
+ -f|--force)
+ local FORCE_UPDATE="true"
+ shift
+ ;;
+ -d|--index-data-dir)
+ local INDEX_DIR="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown option: $1"
+ exit 1
+ ;;
+ esac
+ done
+ if [[ -z "$INDEX_DIR" ]] ; then
+ echo "Index data dirctory option is required (-d or --index-data-dir). Exiting..."
+ exit 1
+ fi
+
+ if [[ -z "$SOLR_CORE_FILTERS" ]] ; then
+ SOLR_CORE_FILTERS="hadoop_logs,audit_logs,history"
+ fi
+
+ if [[ -z "$FORCE_UPDATE" ]] ; then
+ FORCE_UPDATE="false"
+ else
+ echo "NOTE: Forcing index upgrade is set."
+ fi
+
+ CORES=$(for replica_dir in `find $INDEX_DIR -name data`; do dirname $replica_dir; done);
+ if [[ -z "$CORES" ]] ; then
+ echo "No indices found on path $INDEX_DIR"
+ else
+ for c in $CORES ; do
+ if find $c/data -maxdepth 1 -type d -name 'index*' 1> /dev/null 2>&1; then
+ name=$(echo $c | sed -e 's/.*\///g')
+ abspath=$(cd "$(dirname "$c")"; pwd)/$(basename "$c")
+ find $c/data -maxdepth 1 -type d -name 'index*' | while read indexDir; do
+ echo "Checking core $name - $abspath"
+ upgrade_core "$indexDir" "$FORCE_UPDATE" "$SOLR_CORE_FILTERS"
+ done
+ else
+ echo "No index folder found for $name"
+ fi
+ done
+ echo "DONE"
+ fi
+}
+
+function upgrade_index_tool() {
+ # see: https://cwiki.apache.org/confluence/display/solr/IndexUpgrader+Tool
+ PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$DIR/libs/lucene-core-6.6.0.jar:$DIR/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.IndexUpgrader ${@}
+}
+
+function check_index_tool() {
+ PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$DIR/libs/lucene-core-6.6.0.jar:$DIR/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.CheckIndex ${@}
+}
+
+function main() {
+ command="$1"
+ case $command in
+ "upgrade-index")
+ upgrade_index "${@:2}"
+ ;;
+ "run-check-index-tool")
+ check_index_tool "${@:2}"
+ ;;
+ "run-upgrade-index-tool")
+ upgrade_index_tool "${@:2}"
+ ;;
+ "help")
+ print_help
+ ;;
+ *)
+ echo "Available commands: (upgrade-index | run-check-index-tool | run-upgrade-index-tool | help)"
+ ;;
+ esac
+}
+
+main ${1+"$@"}
diff --git a/ambari-infra-solr-plugin/pom.xml b/ambari-infra-solr-plugin/pom.xml
index c890cec4..3337d99b 100644
--- a/ambari-infra-solr-plugin/pom.xml
+++ b/ambari-infra-solr-plugin/pom.xml
@@ -47,8 +47,8 @@
maven-compiler-plugin
3.3
- 1.7
- 1.7
+ ${jdk.version}
+ ${jdk.version}
diff --git a/ambari-infra-solr-plugin/src/main/java/org.apache.ambari.infra.security/InfraKerberosHostValidator.java b/ambari-infra-solr-plugin/src/main/java/org/apache/ambari/infra/security/InfraKerberosHostValidator.java
similarity index 100%
rename from ambari-infra-solr-plugin/src/main/java/org.apache.ambari.infra.security/InfraKerberosHostValidator.java
rename to ambari-infra-solr-plugin/src/main/java/org/apache/ambari/infra/security/InfraKerberosHostValidator.java
diff --git a/ambari-infra-solr-plugin/src/main/java/org.apache.ambari.infra.security/InfraRuleBasedAuthorizationPlugin.java b/ambari-infra-solr-plugin/src/main/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPlugin.java
similarity index 100%
rename from ambari-infra-solr-plugin/src/main/java/org.apache.ambari.infra.security/InfraRuleBasedAuthorizationPlugin.java
rename to ambari-infra-solr-plugin/src/main/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPlugin.java
diff --git a/ambari-infra-solr-plugin/src/main/java/org.apache.ambari.infra.security/InfraUserRolesLookupStrategy.java b/ambari-infra-solr-plugin/src/main/java/org/apache/ambari/infra/security/InfraUserRolesLookupStrategy.java
similarity index 100%
rename from ambari-infra-solr-plugin/src/main/java/org.apache.ambari.infra.security/InfraUserRolesLookupStrategy.java
rename to ambari-infra-solr-plugin/src/main/java/org/apache/ambari/infra/security/InfraUserRolesLookupStrategy.java
diff --git a/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java b/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
index ee84969c..f1f842d9 100644
--- a/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
+++ b/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
@@ -242,6 +242,11 @@ public String getHttpMethod() {
public String getResource() {
return (String) values.get("resource");
}
+
+ @Override
+ public Object getHandler() {
+ return null;
+ }
}
}
diff --git a/pom.xml b/pom.xml
index a6a6961b..908abb45 100644
--- a/pom.xml
+++ b/pom.xml
@@ -30,8 +30,8 @@
pom
- 1.7
- 5.5.2
+ 1.8
+ 6.6.0
UTF-8
python >= 2.6
python (>= 2.6)
@@ -43,6 +43,7 @@
ambari-infra-assembly
ambari-infra-solr-client
ambari-infra-solr-plugin
+ ambari-infra-manager
@@ -177,8 +178,8 @@
- utility
- utility
+ org.apache.ambari
+ ambari-utility
1.0.0.0-SNAPSHOT
test