diff --git a/NOTICE b/NOTICE index e5fc91f34b..d6cba4b41e 100644 --- a/NOTICE +++ b/NOTICE @@ -5,305 +5,301 @@ Copyright 2018 and onwards SnappyData Inc. This is a comprehensive list of software libraries used by SnappyData in version 1.0. More details on license types, license versions, and contributors can be found further down in this file -HikariCP-2.6.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -JavaEWAH-0.3.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -RoaringBitmap-0.5.11.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -ST4-4.0.4.jar BSD License http://antlr.org/license.html -Vis.js Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -activation-1.1.1.jar CDDL 1.0 https://glassfish.dev.java.net/public/CDDLv1.0.html -akka-actor_2.11-2.3.16.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -akka-cluster_2.11-2.3.16.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -akka-remote_2.11-2.3.16.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -akka-slf4j_2.11-2.3.16.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -ant-1.9.7.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -antlr-2.7.7.jar BSD License http://antlr.org/license.html -antlr-runtime-3.4.jar BSD License http://antlr.org/license.html -antlr4-runtime-4.5.3.jar BSD License http://antlr.org/license.html +HikariCP-2.7.9.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +JavaEWAH-0.3.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +RoaringBitmap-0.6.66.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +ST4-4.0.4.jar BSD License: http://antlr.org/license.html +Vis.js Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +activation-1.1.1.jar CDDL 1.0: https://glassfish.dev.java.net/public/CDDLv1.0.html +akka-actor_2.11-2.3.16.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +akka-cluster_2.11-2.3.16.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +akka-remote_2.11-2.3.16.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +akka-slf4j_2.11-2.3.16.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +ant-1.9.13.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +antlr-2.7.7.jar BSD License: http://antlr.org/license.html +antlr-runtime-3.4.jar BSD License: http://antlr.org/license.html +antlr4-runtime-4.5.3.jar BSD License: http://antlr.org/license.html aopalliance-1.0.jar Public Domain -aopalliance-repackaged-2.4.0-b34.jar CDDL/GPLv2+CE https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html -apache-log4j-extras-1.2.17.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -apacheds-i18n-2.0.0-M15.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -apacheds-kerberos-codec-2.0.0-M15.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -api-asn1-api-1.0.0-M20.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -api-util-1.0.0-M20.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -arpack_combined_all-0.1.jar BSD License http://www.opensource.org/licenses/bsd-license.php -avro-1.7.7.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -avro-ipc-1.7.7.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -avro-mapred-1.7.7-hadoop2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -aws-java-sdk-1.7.4.jar Apache V2 https://aws.amazon.com/apache2.0 +aopalliance-repackaged-2.5.0-b42.jar CDDL/GPLv2+CE: https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html +apache-log4j-extras-1.2.17.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +arpack_combined_all-0.1.jar BSD License: http://www.opensource.org/licenses/bsd-license.php +audience-annotations-0.5.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +avro-1.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +avro-ipc-1.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +avro-mapred-1.7.7-hadoop2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +aws-java-sdk-1.7.4.jar Apache V2: https://aws.amazon.com/apache2.0 base64-2.3.8.jar Public Domain -bcprov-jdk15on-1.51.jar Bouncy Castle License http://www.bouncycastle.org/licence.html -bonecp-0.8.0.RELEASE.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -breeze-macros_2.11-0.12.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -breeze_2.11-0.12.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -calcite-avatica-1.2.0-incubating.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -calcite-core-1.2.0-incubating.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -calcite-linq4j-1.2.0-incubating.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -cglib-2.2.1-v20090111.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -chill-java-0.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -chill_2.11-0.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-beanutils-1.9.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-cli-1.3.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-codec-1.10.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-collections-3.2.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-compiler-3.0.7.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-compress-1.4.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-configuration-1.10.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-crypto-1.0.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-dbcp-1.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-httpclient-3.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-io-2.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-lang-2.6.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-lang3-3.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-logging-1.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-math3-3.4.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-modeler-2.0.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-net-3.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-pool-1.6.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -commons-pool2-2.4.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -compress-lzf-1.0.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -concurrentlinkedhashmap-lru-1.4.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -config-1.3.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -core-1.1.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -curator-client-2.7.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -curator-framework-2.7.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -curator-recipes-2.7.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -d3.js BSD-3 https://github.com/d3/d3/blob/master/LICENSE -datanucleus-api-jdo-3.2.6.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -datanucleus-core-3.2.10.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -datanucleus-rdbms-3.2.9.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -derby-10.12.1.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -eclipse-collections-9.2.0.jar EPL-1.0 https://www.eclipse.org/legal/epl-v10.html -eigenbase-properties-1.1.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -flyway-core-3.2.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -gemfire-core-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -gemfire-jgroups-1.6.2.jar LGPL2.1 http://www.opensource.org/licenses/lgpl-2.1.php -gemfire-joptsimple-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -gemfire-json-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -gemfire-shared-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -gemfire-trove-1.6.2.jar LGPL2.1 http://www.opensource.org/licenses/lgpl-2.1.php -gson-2.2.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -guava-14.0.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -guice-3.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -guice-servlet-3.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -h2-1.3.176.jar H2 License V1 http://h2database.com/html/license.html -hadoop-annotations-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-auth-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-aws-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-client-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-common-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-hdfs-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-mapreduce-client-app-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-mapreduce-client-common-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-mapreduce-client-core-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-mapreduce-client-jobclient-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-mapreduce-client-shuffle-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-yarn-api-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-yarn-client-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-yarn-common-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-yarn-server-common-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-yarn-server-nodemanager-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hadoop-yarn-server-web-proxy-2.7.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hammer.js MIT http://www.opensource.org/licenses/mit-license.php -hbase-client-0.98.17-hadoop2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hbase-common-0.98.17-hadoop2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hbase-protocol-0.98.17-hadoop2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hbase-server-0.98.17-hadoop2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hive-beeline-1.2.1.spark2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hive-cli-1.2.1.spark2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hive-exec-1.2.1.spark2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hive-jdbc-1.2.1.spark2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hive-metastore-1.2.1.spark2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -hk2-api-2.4.0-b34.jar CDDL/GPLv2+CE https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html -hk2-locator-2.4.0-b34.jar CDDL/GPLv2+CE https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html -hk2-utils-2.4.0-b34.jar CDDL/GPLv2+CE https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html -htrace-core-2.05.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -htrace-core-3.2.0-incubating.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -httpclient-4.5.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -httpcore-4.4.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -ivy-2.4.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-annotations-2.6.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-core-2.6.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-core-asl-1.9.13.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-databind-2.6.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-jaxrs-1.9.13.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-mapper-asl-1.9.13.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-module-paranamer-2.6.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-module-scala_2.11-2.6.5.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jackson-xc-1.9.13.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -janino-3.0.7.jar BSD https://raw.githubusercontent.com/janino-compiler/janino/master/LICENSE -java-xmlbuilder-1.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -javassist-3.18.1-GA.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -javax.annotation-api-1.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -javax.inject-1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -javax.inject-2.4.0-b34.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -javax.mail-api-1.5.5.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -javax.resource-api-1.7.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -javax.servlet-api-3.1.0.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -javax.transaction-api-1.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -javax.ws.rs-api-2.0.1.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -javolution-5.5.1.jar BSD http://javolution.org/LICENSE.txt -jaxb-api-2.2.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jcl-over-slf4j-1.7.21.jar MIT http://www.opensource.org/licenses/mit-license.php -jdo-api-3.0.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jersey-client-2.22.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jersey-common-2.22.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jersey-container-servlet-2.22.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jersey-container-servlet-core-2.22.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jersey-guava-2.22.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jersey-media-jaxb-2.22.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jersey-server-2.22.2.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jets3t-0.9.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jettison-1.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-6.1.26.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-client-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-continuation-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-http-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-io-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-jndi-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-plus-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-proxy-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-security-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-server-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-servlet-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-servlets-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-util-6.1.26.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-util-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-webapp-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jetty-xml-9.2.22.v20170606.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jline-2.14.2.jar BSD http://www.opensource.org/licenses/bsd-license.php -jna-4.2.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -joda-convert-1.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -joda-time-2.9.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jodd-core-3.5.2.jar BSD http://jodd.org/license.html -jpam-1.1.jar Apache V2 http://jpam.sourceforge.net/LICENSE.txt -json4s-ast_2.11-3.2.11.jar Apache V2 http://jpam.sourceforge.net/LICENSE.txt -json4s-core_2.11-3.2.11.jar Apache V2 http://jpam.sourceforge.net/LICENSE.txt -json4s-jackson_2.11-3.2.11.jar Apache V2 http://jpam.sourceforge.net/LICENSE.txt -jsp-api-2.1.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -jsr305-3.0.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jta-1.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -jtransforms-2.4.0.jar MPL http://www.mozilla.org/MPL/2.0/index.txt -jul-to-slf4j-1.7.21.jar MIT http://www.opensource.org/licenses/mit-license.php -kafka-clients-0.8.2.1.jar Apache V2 http://jpam.sourceforge.net/LICENSE.txt -kafka_2.11-0.8.2.1.jar Apache V2 http://jpam.sourceforge.net/LICENSE.txt -kryo-shaded-4.0.0.jar BSD http://www.opensource.org/licenses/bsd-license.php -leveldbjni-all-1.8.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -libfb303-0.9.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -libgemfirexd.so Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -libgemfirexd64.so Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -libgemfirexd64_g.so Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -libgemfirexd_g.so Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -libthrift-0.9.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -liquidFillGauge.js BSD http://choosealicense.com/licenses/bsd-2-clause -log4j-1.2.17.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -lz4-1.3.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -macro-compat_2.11-1.1.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -mail-1.4.7.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -metrics-core-2.2.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -metrics-core-3.1.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -metrics-graphite-3.1.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -metrics-json-3.1.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -metrics-jvm-3.1.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -mimepull-1.9.5.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -minlog-1.3.0.jar BSD http://www.opensource.org/licenses/bsd-license.php -moment.js MIT http://www.opensource.org/licenses/mit-license.php -mx4j-3.0.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -mx4j-remote-3.0.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -mx4j-tools-3.0.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -netty-3.9.9.Final.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -netty-all-4.0.43.Final.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -objenesis-2.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -opencsv-2.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -org.osgi.core-6.0.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -oro-2.0.8.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -osgi-resource-locator-1.0.1.jar CDDL/GPLv2+CE https://glassfish.java.net/public/CDDL+GPL_1_1.html -paranamer-2.6.jar BSD http://www.opensource.org/licenses/bsd-license.php -parboiled-core-1.1.7.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parboiled-scala_2.11-1.1.7.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parboiled_2.11-2.1.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parquet-column-1.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parquet-common-1.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parquet-encoding-1.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parquet-format-2.3.0-incubating.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parquet-hadoop-1.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parquet-hadoop-bundle-1.6.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -parquet-jackson-1.8.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -pmml-model-1.2.15.jar BSD-3 http://opensource.org/licenses/BSD-3-Clause -pmml-schema-1.2.15.jar BSD-3 http://opensource.org/licenses/BSD-3-Clause -protobuf-java-2.6.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -py4j-0.10.4.jar BSD http://www.opensource.org/licenses/bsd-license.php -pyrolite-4.13.jar MIT https://raw.githubusercontent.com/irmen/Pyrolite/master/LICENSE -scala-compiler-2.11.8.jar BSD-3 http://www.scala-lang.org/license.html -scala-library-2.11.8.jar BSD-3 http://www.scala-lang.org/license.html -scala-parser-combinators_2.11-1.0.4.jar BSD-3 http://www.scala-lang.org/license.html -scala-reflect-2.11.8.jar BSD-3 http://www.scala-lang.org/license.html -scala-xml_2.11-1.0.4.jar BSD-3 http://www.scala-lang.org/license.html -scalap-2.11.8.jar BSD-3 http://www.scala-lang.org/license.html -shapeless_2.11-2.3.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -shiro-core-1.2.6.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -slf4j-api-1.7.21.jar MIT http://www.opensource.org/licenses/mit-license.php -slf4j-log4j12-1.7.21.jar MIT http://www.opensource.org/licenses/mit-license.php -slick_2.11-2.1.0.jar BSD http://github.com/slick/slick/blob/master/LICENSE.txt -snappy-0.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-java-1.1.2.6.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-catalyst_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-core_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-graphx_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-hive-thriftserver_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-hive_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-launcher_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-mllib-local_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-mllib_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-network-common_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-network-shuffle_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-repl_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-sketch_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-sql_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-streaming-kafka-0.10_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-streaming_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-tags_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-unsafe_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappy-spark-yarn_2.11-2.1.1.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappydata-cluster_2.11-1.0.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappydata-core_2.11-1.0.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappydata-store-client-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappydata-store-core-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappydata-store-hibernate-1.6.2.jar LGPL2.1 http://www.gnu.org/licenses/lgpl-2.1.html -snappydata-store-shared-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -snappydata-store-tools-1.6.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spark-jobserver_2.11-0.6.2.6.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spire-macros_2.11-0.7.4.jar MIT http://opensource.org/licenses/MIT -spire_2.11-0.7.4.jar MIT http://opensource.org/licenses/MIT -spray-caching_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-can_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-client_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-http_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-httpx_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-io_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-json_2.11-1.3.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-routing-shapeless2_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -spray-util_2.11-1.3.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -stax-api-1.0-2.jar CDDL 1.0 https://opensource.org/licenses/CDDL-1.0 -stax-api-1.0.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -stream-2.7.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -stringtemplate-3.2.1.jar BSD http://antlr.org/license.html -super-csv-2.2.0.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -tomcat-jdbc-8.5.9.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -tomcat-juli-8.5.9.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -twitter4j-core-4.0.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -twitter4j-stream-4.0.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -uncommons-maths-1.2.2a.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -univocity-parsers-2.2.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -validation-api-1.1.0.Final.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -xbean-asm5-shaded-4.4.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -xercesImpl-2.9.1.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -xml-apis-2.0.2.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -xmlenc-0.52.jar BSD http://www.opensource.org/licenses/bsd-license.php -xom-1.2.10.jar LGPL2.1 http://www.gnu.org/licenses/lgpl-2.1.html +bcprov-jdk15on-1.52.jar Bouncy Castle License: http://www.bouncycastle.org/licence.html +bonecp-0.8.0.RELEASE.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +breeze-macros_2.11-0.13.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +breeze_2.11-0.13.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +calcite-avatica-1.4.0-incubating.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +calcite-core-1.4.0-incubating.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +calcite-linq4j-1.4.0-incubating.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +cglib-2.2.1-v20090111.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +chill-java-0.8.5.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +chill_2.11-0.8.5.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-beanutils-1.9.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-cli-1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-codec-1.11.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-collections-3.2.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-compiler-3.0.11.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-compress-1.4.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-configuration-1.10.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-crypto-1.0.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-dbcp-1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-httpclient-3.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-io-2.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-lang-2.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-lang3-3.8.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-logging-1.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-math3-3.6.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-modeler-2.0.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-net-3.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-pool-1.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +commons-pool2-2.6.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +compress-lzf-1.0.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +concurrentlinkedhashmap-lru-1.4.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +config-1.3.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +core-1.1.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +curator-client-2.7.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +curator-framework-2.7.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +curator-recipes-2.7.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +d3.js BSD-3: https://github.com/d3/d3/blob/master/LICENSE +datanucleus-api-jdo-3.2.8.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +datanucleus-core-3.2.15.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +datanucleus-rdbms-3.2.13.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +derby-10.14.2.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +eclipse-collections-api-9.2.0.jar EPL-1.0: https://www.eclipse.org/legal/epl-v10.html +eclipse-collections-9.2.0.jar EPL-1.0: https://www.eclipse.org/legal/epl-v10.html +eigenbase-properties-1.1.5.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +flyway-core-3.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +gemfire-core-1.6.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +gemfire-jgroups-1.6.2.1.jar LGPL2.1: http://www.opensource.org/licenses/lgpl-2.1.php +gemfire-shared-1.6.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +gemfire-trove-1.6.2.1.jar LGPL2.1: http://www.opensource.org/licenses/lgpl-2.1.php +gemfire-util-1.6.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +gson-2.2.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +guava-14.0.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +guice-3.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +guice-servlet-3.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +h2-1.3.176.jar H2 License V1: http://h2database.com/html/license.html +hadoop-annotations-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-auth-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-aws-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-client-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-common-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-hdfs-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-mapreduce-client-app-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-mapreduce-client-common-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-mapreduce-client-core-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-mapreduce-client-jobclient-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-mapreduce-client-shuffle-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-yarn-api-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-yarn-client-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-yarn-common-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-yarn-server-common-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-yarn-server-nodemanager-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hadoop-yarn-server-web-proxy-2.7.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hammer.js MIT: http://www.opensource.org/licenses/mit-license.php +hive-beeline-1.2.1.spark2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hive-cli-1.2.1.spark2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hive-exec-1.2.1.spark2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hive-jdbc-1.2.1.spark2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hive-metastore-1.2.1.spark2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +hk2-api-2.5.0-b42.jar CDDL/GPLv2+CE: https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html +hk2-locator-2.5.0-b42.jar CDDL/GPLv2+CE: https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html +hk2-utils-2.5.0-b42.jar CDDL/GPLv2+CE: https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html +htrace-core-3.2.0-incubating.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +httpclient-4.5.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +httpcore-4.4.10.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +ivy-2.4.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-annotations-2.6.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-core-2.6.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-core-asl-1.9.13.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-databind-2.6.7.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-jaxrs-1.9.13.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-mapper-asl-1.9.13.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-module-paranamer-2.6.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-module-scala_2.11-2.6.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jackson-xc-1.9.13.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +janino-3.0.11.jar BSD: https://raw.githubusercontent.com/janino-compiler/janino/master/LICENSE +java-xmlbuilder-1.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +javassist-3.22.0-CR2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +javax.annotation-api-1.2.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +javax.inject-1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +javax.inject-2.5.0-b42.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +javax.resource-api-1.7.1.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +javax.servlet-api-4.0.1.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +javax.transaction-api-1.3.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +javax.ws.rs-api-2.1.1.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +javolution-5.5.1.jar BSD: http://javolution.org/LICENSE.txt +jaxb-api-2.2.2.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jcl-over-slf4j-1.7.25.jar MIT: http://www.opensource.org/licenses/mit-license.php +jdo-api-3.0.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jersey-client-2.27.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jersey-common-2.27.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jersey-container-servlet-2.27.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jersey-container-servlet-core-2.27.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jersey-hk2-2.27.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jersey-media-jaxb-2.27.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jersey-server-2.27.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jets3t-0.9.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jettison-1.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-6.1.26.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-client-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-continuation-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-http-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-io-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-jndi-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-plus-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-proxy-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-security-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-server-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-servlet-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-servlets-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-sslengine-6.1.26.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-util-6.1.26.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-util-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-webapp-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jetty-xml-9.2.26.v20180806.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jline-2.14.6.jar BSD: http://www.opensource.org/licenses/bsd-license.php +jna-4.5.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +joda-convert-2.1.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +joda-time-2.10.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jodd-core-5.0.6.jar BSD: http://jodd.org/license.html +jpam-1.1.jar Apache V2: http://jpam.sourceforge.net/LICENSE.txt +json4s-ast_2.11-3.2.11.jar Apache V2: http://jpam.sourceforge.net/LICENSE.txt +json4s-core_2.11-3.2.11.jar Apache V2: http://jpam.sourceforge.net/LICENSE.txt +json4s-jackson_2.11-3.2.11.jar Apache V2: http://jpam.sourceforge.net/LICENSE.txt +jsp-api-2.1.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +jsr305-3.0.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jta-1.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +jtransforms-2.4.0.jar MPL: http://www.mozilla.org/MPL/2.0/index.txt +jul-to-slf4j-1.7.25.jar MIT: http://www.opensource.org/licenses/mit-license.php +kafka-clients-0.10.0.1.jar Apache V2: http://jpam.sourceforge.net/LICENSE.txt +kafka_2.11-0.10.0.1.jar Apache V2: http://jpam.sourceforge.net/LICENSE.txt +kryo-shaded-4.0.2.jar BSD: http://www.opensource.org/licenses/bsd-license.php +leveldbjni-all-1.8.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +libfb303-0.9.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +libgemfirexd.dylib Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +libgemfirexd.so Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +libgemfirexd64.dylib Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +libgemfirexd64.so Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +libthrift-0.9.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +liquidFillGauge.js BSD: http://choosealicense.com/licenses/bsd-2-clause +log4j-1.2.17.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +lz4-java-1.5.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +machinist_2.11-0.6.1.jar MIT: http://opensource.org/licenses/MIT +macro-compat_2.11-1.1.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +metrics-core-2.2.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +metrics-core-3.2.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +metrics-graphite-3.2.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +metrics-json-3.2.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +metrics-jvm-3.2.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +mimepull-1.9.5.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +minlog-1.3.0.jar BSD: http://www.opensource.org/licenses/bsd-license.php +moment.js MIT: http://www.opensource.org/licenses/mit-license.php +mx4j-3.0.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +mx4j-remote-3.0.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +mx4j-tools-3.0.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +netty-3.10.6.Final.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +netty-all-4.0.56.Final.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +objenesis-3.0.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +opencsv-2.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +org.osgi.core-6.0.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +oro-2.0.8.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +osgi-resource-locator-1.0.1.jar CDDL/GPLv2+CE: https://glassfish.java.net/public/CDDL+GPL_1_1.html +paranamer-2.6.jar BSD: http://www.opensource.org/licenses/bsd-license.php +parboiled-core-1.1.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parboiled-scala_2.11-1.1.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parboiled_2.11-2.1.5.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parquet-column-1.8.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parquet-common-1.8.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parquet-encoding-1.8.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parquet-format-2.3.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parquet-hadoop-1.8.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parquet-hadoop-bundle-1.6.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +parquet-jackson-1.8.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +pmml-model-1.2.17.jar BSD-3: http://opensource.org/licenses/BSD-3-Clause +pmml-schema-1.2.17.jar BSD-3: http://opensource.org/licenses/BSD-3-Clause +protobuf-java-3.6.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +py4j-0.10.8.1.jar BSD: http://www.opensource.org/licenses/bsd-license.php +pyrolite-4.22.jar MIT: https://raw.githubusercontent.com/irmen/Pyrolite/master/LICENSE +scala-compiler-2.11.8.jar BSD-3: http://www.scala-lang.org/license.html +scala-library-2.11.8.jar BSD-3: http://www.scala-lang.org/license.html +scala-parser-combinators_2.11-1.0.4.jar BSD-3: http://www.scala-lang.org/license.html +scala-reflect-2.11.8.jar BSD-3: http://www.scala-lang.org/license.html +scala-xml_2.11-1.0.4.jar BSD-3: http://www.scala-lang.org/license.html +scalap-2.11.8.jar BSD-3: http://www.scala-lang.org/license.html +servlet-api-2.5-20081211.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +shapeless_2.11-2.3.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +shiro-core-1.2.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +slf4j-api-1.7.25.jar MIT: http://www.opensource.org/licenses/mit-license.php +slf4j-log4j12-1.7.25.jar MIT: http://www.opensource.org/licenses/mit-license.php +slick_2.11-2.1.0.jar BSD: http://github.com/slick/slick/blob/master/LICENSE.txt +snappy-0.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-java-1.1.7.2.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-catalyst_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-core_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-graphx_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-hive-thriftserver_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-hive_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-launcher_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-mllib-local_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-mllib_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-network-common_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-network-shuffle_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-repl_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-sketch_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-sql_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-sql-kafka-0.10_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-streaming-kafka-0.10_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-streaming_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-tags_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-unsafe_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappy-spark-yarn_2.11-2.1.1.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-cluster_2.11-1.0.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-core_2.11-1.0.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-jdbc_2.11-1.0.2.1-only.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-launcher-1.0.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-store-client-1.6.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-store-core-1.6.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-store-hibernate-1.6.2.1.jar LGPL2.1: http://www.gnu.org/licenses/lgpl-2.1.html +snappydata-store-shared-1.6.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +snappydata-store-tools-1.6.2.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spark-jobserver_2.11-0.6.2.8.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spire-macros_2.11-0.13.0.jar MIT: http://opensource.org/licenses/MIT +spire_2.11-0.13.0.jar MIT: http://opensource.org/licenses/MIT +spray-caching_2.11-1.3.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-can_2.11-1.3.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-client_2.11-1.3.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-http_2.11-1.3.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-httpx_2.11-1.3.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-io_2.11-1.3.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-json_2.11-1.3.5.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-routing-shapeless2_2.11-1.3.3.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +spray-util_2.11-1.3.4.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +stax-api-1.0-2.jar CDDL 1.0: https://opensource.org/licenses/CDDL-1.0 +stax-api-1.0.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +stream-2.9.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +stringtemplate-3.2.1.jar BSD: http://antlr.org/license.html +super-csv-2.2.0.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +tomcat-jdbc-8.5.37.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +tomcat-juli-8.5.37.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +twitter4j-core-4.0.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +twitter4j-stream-4.0.7.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +uncommons-maths-1.2.2a.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +univocity-parsers-2.7.6.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +validation-api-1.1.0.Final.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +xbean-asm5-shaded-4.5.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +xercesImpl-2.9.1.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +xml-apis-1.4.01.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +xmlenc-0.52.jar BSD: http://www.opensource.org/licenses/bsd-license.php +xom-1.2.10.jar LGPL2.1: http://www.gnu.org/licenses/lgpl-2.1.html xz-1.0.jar Public Domain -zkclient-0.3.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 -zookeeper-3.4.8.jar Apache V2 http://www.apache.org/licenses/LICENSE-2.0 +zkclient-0.8.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 +zookeeper-3.4.13.jar Apache V2: http://www.apache.org/licenses/LICENSE-2.0 diff --git a/build.gradle b/build.gradle index 9326b6280b..a5ca9e17fc 100644 --- a/build.gradle +++ b/build.gradle @@ -15,43 +15,43 @@ * LICENSE file. */ -import groovy.json.JsonSlurper import org.gradle.api.tasks.testing.logging.* import org.gradle.internal.logging.* -apply plugin: 'wrapper' -apply plugin: 'distribution' - -if (JavaVersion.current().isJava8Compatible()) { - allprojects { - tasks.withType(Javadoc) { - options.addStringOption('Xdoclint:none', '-quiet') - /* - if (javax.tools.ToolProvider.getSystemDocumentationTool().isSupportedOption("--allow-script-in-comments") == 0) { - options.addBooleanOption("-allow-script-in-comments", true) - } - */ - } - } -} buildscript { repositories { maven { url 'https://plugins.gradle.org/m2' } mavenCentral() } dependencies { - classpath 'io.snappydata:gradle-scalatest:0.16' + classpath 'io.snappydata:gradle-scalatest:0.23' classpath 'org.github.ngbinh.scalastyle:gradle-scalastyle-plugin_2.11:0.9.0' - classpath "net.rdrei.android.buildtimetracker:gradle-plugin:0.8.+" - classpath 'com.netflix.nebula:gradle-ospackage-plugin:4.4.+' + classpath 'com.github.jengelman.gradle.plugins:shadow:4.0.3' + classpath 'de.undercouch:gradle-download-task:3.4.3' + classpath 'net.rdrei.android.buildtimetracker:gradle-plugin:0.11.+' + classpath 'com.netflix.nebula:gradle-ospackage-plugin:5.2.+' } } +apply plugin: 'wrapper' +apply plugin: 'distribution' +apply plugin: 'nebula.ospackage-base' +apply plugin: "nebula.ospackage" + allprojects { // We want to see all test results. This is equivalent to setting --continue // on the command line. gradle.startParameter.continueOnFailure = true + tasks.withType(Javadoc) { + options.addStringOption('Xdoclint:none', '-quiet') + /* + if (javax.tools.ToolProvider.getSystemDocumentationTool().isSupportedOption("--allow-script-in-comments") == 0) { + options.addBooleanOption("-allow-script-in-comments", true) + } + */ + } + repositories { mavenCentral() maven { url 'https://dl.bintray.com/big-data/maven' } @@ -65,10 +65,9 @@ allprojects { apply plugin: 'java' apply plugin: 'maven' apply plugin: 'scalaStyle' + apply plugin: 'com.github.johnrengelman.shadow' apply plugin: 'idea' apply plugin: "build-time-tracker" - apply plugin: "nebula.ospackage" - apply plugin: 'nebula.ospackage-base' group = 'io.snappydata' version = '1.0.2.1' @@ -84,9 +83,10 @@ allprojects { options.forkOptions.jvmArgs = [ '-J-Xmx2g', '-J-Xms2g', '-J-XX:ReservedCodeCacheSize=512m', '-J-Djava.net.preferIPv4Stack=true' ] } tasks.withType(ScalaCompile) { + options.fork = true + options.forkOptions.jvmArgs = [ '-Xmx2g', '-Xms2g', '-XX:ReservedCodeCacheSize=512m', '-Djava.net.preferIPv4Stack=true' ] // scalaCompileOptions.optimize = true // scalaCompileOptions.useAnt = false - // scalaCompileOptions.fork = false scalaCompileOptions.deprecation = false scalaCompileOptions.additionalParameters = [ '-feature' ] options.encoding = 'UTF-8' @@ -96,13 +96,6 @@ allprojects { javadoc.options.charSet = 'UTF-8' - gradle.taskGraph.whenReady( { graph -> - tasks.withType(Tar).each { tar -> - tar.compression = Compression.GZIP - tar.extension = 'tar.gz' - } - }) - ext { productName = 'SnappyData' vendorName = 'SnappyData, Inc.' @@ -116,20 +109,47 @@ allprojects { log4jVersion = '1.2.17' slf4jVersion = '1.7.25' junitVersion = '4.12' - hadoopVersion = '2.7.3' + mockitoVersion = '1.10.19' + hadoopVersion = '2.7.7' scalatestVersion = '2.2.6' - jettyVersion = '9.2.22.v20170606' + jettyVersion = '9.2.26.v20180806' guavaVersion = '14.0.1' kryoVersion = '4.0.1' thriftVersion = '0.9.3' - metricsVersion = '3.2.5' - janinoVersion = '3.0.8' - derbyVersion = '10.12.1.1' + metricsVersion = '4.0.3' + metrics2Version = '2.2.0' + janinoVersion = '3.0.11' + derbyVersion = '10.14.2.0' + parboiledVersion = '2.1.5' + tomcatJdbcVersion = '8.5.37' + hikariCPVersion = '2.7.9' + twitter4jVersion = '4.0.7' + objenesisVersion = '3.0.1' + rabbitMqVersion = '4.9.1' + akkaVersion = '2.3.16' + sprayVersion = '1.3.4' + sprayJsonVersion = '1.3.5' + sprayShapelessVersion = '1.3.3' + sprayTestkitVersion = '1.3.4' + jodaVersion = '2.1.2' + jodaTimeVersion = '2.10.1' + slickVersion = '2.1.0' + h2Version = '1.3.176' + commonsIoVersion = '2.6' + commonsPoolVersion = '1.6' + dbcpVersion = '1.4' + shiroVersion = '1.2.6' + flywayVersion = '3.2.1' + typesafeConfigVersion = '1.3.3' + mssqlVersion = '7.0.0.jre8' + antlr2Version = '2.7.7' + pegdownVersion = '1.6.0' snappyStoreVersion = '1.6.2.1' snappydataVersion = version pulseVersion = '1.5.1' zeppelinInterpreterVersion = '0.7.3.4' + buildFlags = '' createdBy = System.getProperty('user.name') osArch = System.getProperty('os.arch') @@ -140,6 +160,7 @@ allprojects { jdkVersion = System.getProperty('java.version') sparkJobServerVersion = '0.6.2.8' eclipseCollectionsVersion = '9.2.0' + fastutilVersion = '8.2.2' gitCmd = "git --git-dir=${rootDir}/.git --work-tree=${rootDir}" gitBranch = "${gitCmd} rev-parse --abbrev-ref HEAD".execute().text.trim() @@ -175,8 +196,14 @@ allprojects { // force same output directory for IDEA and gradle idea { module { - outputDir file("${project.buildDir}/classes/main") - testOutputDir file("${project.buildDir}/classes/test") + def projOutDir = file("${projectDir}/src/main/scala").exists() + ? "${project.sourceSets.main.java.outputDir}/../../scala/main" + : project.sourceSets.main.java.outputDir + def projTestOutDir = file("${projectDir}/src/test/scala").exists() + ? "${project.sourceSets.test.java.outputDir}/../../scala/test" + : project.sourceSets.test.java.outputDir + outputDir file(projOutDir) + testOutputDir file(projTestOutDir) } } } @@ -294,7 +321,8 @@ subprojects { } task scalaTest(type: Test) { - actions = [ new com.github.maiflai.ScalaTestAction() ] + def factory = new com.github.maiflai.BackwardsCompatibleJavaExecActionFactory(gradle.gradleVersion) + actions = [ new com.github.maiflai.ScalaTestAction(factory) ] // top-level default is single process run since scalatest does not // spawn separate JVMs maxParallelForks = 1 @@ -311,6 +339,7 @@ subprojects { testLogging.exceptionFormat = TestExceptionFormat.FULL testLogging.events = TestLogEvent.values() as Set + extensions.add(com.github.maiflai.ScalaTestAction.TAGS, new org.gradle.api.tasks.util.PatternSet()) List suites = [] extensions.add(com.github.maiflai.ScalaTestAction.SUITES, suites) extensions.add('suite', { String name -> suites.add(name) } ) @@ -322,11 +351,11 @@ subprojects { def output = new StringBuilder() extensions.add(com.github.maiflai.ScalaTestAction.TESTOUTPUT, output) - extensions.add('testOutput', { String name -> output.setLength(0); output.append(name) } ) + extensions.add('testOutput', { String name -> output.setLength(0); output.append(name) }) def errorOutput = new StringBuilder() extensions.add(com.github.maiflai.ScalaTestAction.TESTERROR, errorOutput) - extensions.add('testError', { String name -> errorOutput.setLength(0); errorOutput.append(name) } ) + extensions.add('testError', { String name -> errorOutput.setLength(0); errorOutput.append(name) }) // running a single scala suite if (rootProject.hasProperty('singleSuite')) { @@ -334,7 +363,7 @@ subprojects { } workingDir = "${testResultsBase}/scalatest" - testResult '/dev/tty' + // testResult '/dev/tty' testOutput "${workingDir}/output.txt" testError "${workingDir}/error.txt" binResultsDir = file("${workingDir}/binary/${project.name}") @@ -397,35 +426,6 @@ subprojects { "-Dio.netty.allocator.numHeapArenas=${numArenas}", "-Dio.netty.allocator.numDirectArenas=${numArenas}"] - String single = System.getProperty('dunit.single') - if (single == null || single.length() == 0) { - single = rootProject.hasProperty('dunit.single') ? - rootProject.property('dunit.single') : null - } - if (single == null || single.length() == 0) { - def dunitTests = fileTree(dir: testClassesDir, - includes: ['**/*DUnitTest.class', '**/*DUnit.class'], - excludes: ['**/*DUnitSecurityTest.class', '**/NCJ*DUnit.class', '**/pivotal/gemfirexd/wan/**/*DUnit.class']) - FileTree includeTestFiles = dunitTests - int dunitFrom = rootProject.hasProperty('dunit.from') ? - getLast(includeTestFiles, rootProject.property('dunit.from')) : 0 - int dunitTo = rootProject.hasProperty('dunit.to') ? - getLast(includeTestFiles, rootProject.property('dunit.to')) : includeTestFiles.size() - - int begin = dunitFrom != -1 ? dunitFrom : 0 - int end = dunitTo != -1 ? dunitTo : includeTestFiles.size() - def filteredSet = includeTestFiles.drop(begin).take(end-begin+1).collect {f -> "**/" + f.name} - if (begin != 0 || end != includeTestFiles.size()) { - println("Picking tests :") - filteredSet.each { a -> println(a) } - } - include filteredSet - } else { - include single - } - exclude '**/*Suite.class' - exclude '**/*DUnitSecurityTest.class' - workingDir = "${testResultsBase}/dunit" binResultsDir = file("${workingDir}/binary/${project.name}") @@ -481,36 +481,6 @@ subprojects { "-Dio.netty.allocator.numHeapArenas=${numArenas}", "-Dio.netty.allocator.numDirectArenas=${numArenas}"] - def single = rootProject.hasProperty('dunitSecurity.single') ? - rootProject.property('dunitSecurity.single') : null - if (single == null || single.length() == 0) { - def dunitSecurityTests = fileTree(dir: testClassesDir, - includes: ['**/*DUnitSecurityTest.class'], - excludes: ['**/*DUnitTest.class', '**/NCJ*DUnit.class', '**/pivotal/gemfirexd/wan/**/*DUnit.class']) - FileTree includeTestFiles = dunitSecurityTests - int dunitFrom = rootProject.hasProperty('dunitSecurity.from') ? - getLast(includeTestFiles, rootProject.property('dunitSecurity.from')) : 0 - int dunitTo = rootProject.hasProperty('dunitSecurity.to') ? - getLast(includeTestFiles, rootProject.property('dunitSecurity.to')) : includeTestFiles.size() - - int begin = dunitFrom != -1 ? dunitFrom : 0 - int end = dunitTo != -1 ? dunitTo : includeTestFiles.size() - def filteredSet = includeTestFiles.drop(begin).take(end-begin+1).collect {f -> "**/" + f.name} - if (begin != 0 || end != includeTestFiles.size()) { - println("Picking tests :") - filteredSet.each { a -> println(a) } - } - include filteredSet - } else { - include single - } - exclude '**/*Suite.class' - exclude '**/*DUnitTest.class' - exclude '**/*DUnit.class' - if (!rootProject.hasProperty('snappydata.enterprise')) { - exclude '**/*DUnitSecurityTest.class' - } - workingDir = "${testResultsBase}/dunit-security" binResultsDir = file("${workingDir}/binary/${project.name}") @@ -528,87 +498,6 @@ subprojects { } } - gradle.taskGraph.whenReady({ graph -> - tasks.withType(Jar).each { pack -> - if (pack.name == 'packageTests') { - pack.from(pack.project.sourceSets.test.output.classesDir, sourceSets.test.resources.srcDirs) - } - } - tasks.withType(Test).each { test -> - test.configure { - - String logLevel = System.getProperty('logLevel') - if (logLevel != null && logLevel.length() > 0) { - systemProperties 'gemfire.log-level' : logLevel, - 'logLevel' : logLevel - } - logLevel = System.getProperty('securityLogLevel') - if (logLevel != null && logLevel.length() > 0) { - systemProperties 'gemfire.security-log-level' : logLevel, - 'securityLogLevel' : logLevel - } - - environment 'SNAPPY_HOME': snappyProductDir, - 'APACHE_SPARK_HOME': sparkProductDir, - 'APACHE_SPARK_CURRENT_HOME': sparkCurrentProductDir, - 'SNAPPY_DIST_CLASSPATH': "${sourceSets.test.runtimeClasspath.asPath}" - - def failureCount = new java.util.concurrent.atomic.AtomicInteger(0) - def progress = new File(workingDir, 'progress.txt') - def output = new File(workingDir, 'output.txt') - - String eol = System.getProperty('line.separator') - beforeTest { desc -> - String now = now() - progress << "${now} Starting test ${desc.className} ${desc.name}${eol}" - output << "${now} STARTING TEST ${desc.className} ${desc.name}${eol}${eol}" - } - onOutput { desc, event -> - String msg = event.message - if (event.destination.toString() == 'StdErr') { - msg = msg.replace(eol, "${eol}[error] ") - } - output << msg - } - afterTest { desc, result -> - String now = now() - progress << "${now} Completed test ${desc.className} ${desc.name} with result: ${result.resultType}${eol}" - output << "${eol}${now} COMPLETED TEST ${desc.className} ${desc.name} with result: ${result.resultType}${eol}${eol}" - def exceptions = result.exceptions - if (exceptions.size() > 0) { - exceptions.each { t -> - progress << " EXCEPTION: ${getStackTrace(t)}${eol}" - output << "${getStackTrace(t)}${eol}" - } - failureCount.incrementAndGet() - } - } - doLast { - def report = "${test.reports.html.destination}/index.html" - boolean hasProgress = progress.exists() - if (failureCount.get() > 0) { - println() - def failureMsg = "FAILED: There were ${failureCount.get()} failures.${eol}" - if (hasProgress) { - failureMsg += " See the progress report in: file://$progress${eol}" - } - failureMsg += " HTML report in: file://$report" - throw new GradleException(failureMsg) - } else if (hasProgress) { - println() - println("SUCCESS: See the progress report in: file://$progress") - println(" HTML report in: file://$report") - println() - } else { - println() - println("SUCCESS: See the HTML report in: file://$report") - println() - } - } - } - } - }) - // apply default manifest if (rootProject.hasProperty('enablePublish')) { createdBy = 'SnappyData Build Team' @@ -662,6 +551,10 @@ subprojects { "org.apache.hadoop:hadoop-yarn-server-nodemanager:${hadoopVersion}", "org.apache.hadoop:hadoop-yarn-server-web-proxy:${hadoopVersion}" } + configurations.testRuntime { + // below is included indirectly by hadoop deps and conflicts with embedded 1.5.7 apacheds + exclude(group: 'org.apache.directory.server', module: 'apacheds-kerberos-codec') + } task packageTests(type: Jar, dependsOn: testClasses) { description 'Assembles a jar archive of test classes.' @@ -744,6 +637,165 @@ subprojects { } } +// apply common test and misc configuration +gradle.taskGraph.whenReady { graph -> + + String dunitSingle = System.getProperty('dunit.single') + if (dunitSingle == null || dunitSingle.length() == 0) { + dunitSingle = rootProject.hasProperty('dunit.single') ? + rootProject.property('dunit.single') : null + } + String dunitSecSingle = System.getProperty('dunitSecurity.single') + if (dunitSecSingle == null || dunitSecSingle.length() == 0) { + dunitSecSingle = rootProject.hasProperty('dunitSecurity.single') ? + rootProject.property('dunitSecurity.single') : null + } + + def allTasks = subprojects.collect { it.tasks }.flatten() + allTasks.each { task -> + if (task instanceof Tar) { + def tar = (Tar)task + tar.compression = Compression.GZIP + tar.extension = 'tar.gz' + } else if (task instanceof Jar) { + def pack = (Jar)task + if (pack.name == 'packageTests') { + pack.from(pack.project.sourceSets.test.output.classesDirs, pack.project.sourceSets.test.resources.srcDirs) + } + } else if (task instanceof Test) { + def test = (Test)task + test.configure { + + if (test.name == 'dunitTest') { + includes.clear() + excludes.clear() + if (dunitSingle == null || dunitSingle.length() == 0) { + def dunitTests = testClassesDirs.asFileTree.matching { + includes = [ '**/*DUnitTest.class', '**/*DUnit.class' ] + excludes = [ '**/*Suite.class', '**/*DUnitSecurityTest.class', '**/NCJ*DUnit.class', + '**/BackwardCompatabilityPart*DUnit.class', '**/*Perf*DUnit.class', '**/ListAggDUnit.class', + '**/SingleHop*TransactionDUnit.class', '**/*Parallel*AsyncEvent*DUnit.class', '**/pivotal/gemfirexd/wan/**/*DUnit.class' ] + } + FileTree includeTestFiles = dunitTests + int dunitFrom = rootProject.hasProperty('dunit.from') ? + getLast(includeTestFiles, rootProject.property('dunit.from')) : 0 + int dunitTo = rootProject.hasProperty('dunit.to') ? + getLast(includeTestFiles, rootProject.property('dunit.to')) : includeTestFiles.size() + + int begin = dunitFrom != -1 ? dunitFrom : 0 + int end = dunitTo != -1 ? dunitTo : includeTestFiles.size() + def filteredSet = includeTestFiles.drop(begin).take(end-begin+1).collect {f -> "**/" + f.name} + if (begin != 0 || end != includeTestFiles.size()) { + println("Picking tests :") + filteredSet.each { a -> println(a) } + } + include filteredSet + } else { + include dunitSingle + } + } else if (test.name == 'dunitSecurityTest') { + includes.clear() + excludes.clear() + if (!rootProject.hasProperty('snappydata.enterprise')) { + excludes = [ '**/*Suite.class', '**/*DUnitSecurityTest.class', '**/*DUnitTest.class', '**/*DUnit.class' ] + } else if (dunitSecSingle == null || dunitSecSingle.length() == 0) { + def dunitSecurityTests = testClassesDirs.asFileTree.matching { + includes = [ '**/*DUnitSecurityTest.class' ] + excludes = [ '**/*Suite.class', '**/*DUnitTest.class', '**/*DUnit.class' ] + } + FileTree includeTestFiles = dunitSecurityTests + int dunitFrom = rootProject.hasProperty('dunitSecurity.from') ? + getLast(includeTestFiles, rootProject.property('dunitSecurity.from')) : 0 + int dunitTo = rootProject.hasProperty('dunitSecurity.to') ? + getLast(includeTestFiles, rootProject.property('dunitSecurity.to')) : includeTestFiles.size() + + int begin = dunitFrom != -1 ? dunitFrom : 0 + int end = dunitTo != -1 ? dunitTo : includeTestFiles.size() + def filteredSet = includeTestFiles.drop(begin).take(end-begin+1).collect {f -> "**/" + f.name} + if (begin != 0 || end != includeTestFiles.size()) { + println("Picking tests :") + filteredSet.each { a -> println(a) } + } + include filteredSet + } else { + include dunitSecSingle + } + } + + String logLevel = System.getProperty('logLevel') + if (logLevel != null && logLevel.length() > 0) { + systemProperties 'gemfire.log-level' : logLevel, + 'logLevel' : logLevel + } + logLevel = System.getProperty('securityLogLevel') + if (logLevel != null && logLevel.length() > 0) { + systemProperties 'gemfire.security-log-level' : logLevel, + 'securityLogLevel' : logLevel + } + + environment 'SNAPPY_HOME': snappyProductDir, + 'APACHE_SPARK_HOME': sparkProductDir, + 'APACHE_SPARK_CURRENT_HOME': sparkCurrentProductDir, + 'SNAPPY_DIST_CLASSPATH': test.classpath.asPath + + def failureCount = new java.util.concurrent.atomic.AtomicInteger(0) + def progress = new File(workingDir, 'progress.txt') + def output = new File(workingDir, 'output.txt') + + String eol = System.getProperty('line.separator') + beforeTest { desc -> + String now = now() + progress << "${now} Starting test ${desc.className} ${desc.name}${eol}" + output << "${now} STARTING TEST ${desc.className} ${desc.name}${eol}${eol}" + } + onOutput { desc, event -> + String msg = event.message + if (event.destination.toString() == 'StdErr') { + msg = msg.replace(eol, "${eol}[error] ") + } + output << msg + } + afterTest { desc, result -> + String now = now() + progress << "${now} Completed test ${desc.className} ${desc.name} with result: ${result.resultType}${eol}" + output << "${eol}${now} COMPLETED TEST ${desc.className} ${desc.name} with result: ${result.resultType}${eol}${eol}" + def exceptions = result.exceptions + if (exceptions.size() > 0) { + exceptions.each { t -> + progress << " EXCEPTION: ${getStackTrace(t)}${eol}" + output << "${getStackTrace(t)}${eol}" + } + failureCount.incrementAndGet() + } + } + doLast { + def report = "${test.reports.html.destination}/index.html" + boolean hasProgress = progress.exists() + if (failureCount.get() > 0) { + println() + def failureMsg = "FAILED: There were ${failureCount.get()} failures.${eol}" + if (hasProgress) { + failureMsg += " See the progress report in: file://$progress${eol}" + } + failureMsg += " HTML report in: file://$report" + throw new GradleException(failureMsg) + } else if (hasProgress) { + println() + println("SUCCESS: See the progress report in: file://$progress") + println(" HTML report in: file://$report") + println() + } else { + println() + println("SUCCESS: See the HTML report in: file://$report") + println() + } + } + } + } + } +} + + task publishLocal { dependsOn subprojects.findAll { p -> p.name != 'snappydata-native' && p.name != 'snappydata-store-prebuild' && p.name != 'snappydata-store' }.collect { @@ -762,7 +814,7 @@ task publishMaven { task product(type: Zip) { dependsOn ":snappy-cluster_${scalaBinaryVersion}:jar" dependsOn ":snappy-examples_${scalaBinaryVersion}:jar" - dependsOn ":snappy-spark:snappy-spark-assembly_${scalaBinaryVersion}:product" + dependsOn ":snappy-spark:snappy-spark-assembly_${scalaBinaryVersion}:sparkProduct" dependsOn ':snappy-launcher:jar' dependsOn ':jdbcJar' @@ -1029,12 +1081,12 @@ ospackage { } buildRpm { + dependsOn ':packageVSD' + dependsOn ':packageZeppelinInterpreter' requires('glibc') requires('bash') requires('perl') requires('curl') - dependsOn ':packageVSD' - dependsOn ':packageZeppelinInterpreter' if (rootProject.hasProperty('hadoop-provided')) { classifier 'without_hadoop' } @@ -1043,13 +1095,13 @@ buildRpm { } buildDeb { + dependsOn ':packageVSD' + dependsOn ':packageZeppelinInterpreter' requires('libc6') requires('bash') requires('perl') requires('curl') recommends('java8-sdk') - dependsOn ':packageVSD' - dependsOn ':packageZeppelinInterpreter' if (rootProject.hasProperty('hadoop-provided')) { classifier 'without-hadoop' } @@ -1141,19 +1193,27 @@ task generateSources { dependsOn ':snappy-spark:generateSources', ':snappy-store:generateSources' // copy all resource files into build classes path because new versions of IDEA // do not include separate resources path in CLASSPATH if output path has been customized - getSubprojects().collect { proj -> - String resourcesDir = "${proj.projectDir}/src/main/resources" - if (file(resourcesDir).exists()) { - copy { - from resourcesDir - into "${proj.buildDir}/classes/main" + doLast { + subprojects.collect { proj -> + String resourcesDir = proj.sourceSets.main.output.resourcesDir + if (file(resourcesDir).exists()) { + def projOutDir = file("${proj.projectDir}/src/main/scala").exists() + ? "${proj.sourceSets.main.java.outputDir}/../../scala/main" + : proj.sourceSets.main.java.outputDir + copy { + from resourcesDir + into projOutDir + } } - } - resourcesDir = "${proj.projectDir}/src/test/resources" - if (file(resourcesDir).exists()) { - copy { - from resourcesDir - into "${proj.buildDir}/classes/test" + resourcesDir = proj.sourceSets.test.output.resourcesDir + if (file(resourcesDir).exists()) { + def projOutDir = file("${proj.projectDir}/src/test/scala").exists() + ? "${proj.sourceSets.test.java.outputDir}/../../scala/test" + : proj.sourceSets.test.java.outputDir + copy { + from resourcesDir + into projOutDir + } } } } @@ -1202,9 +1262,9 @@ task allReports(type: TestReport) { destinationDir = file("${testResultsBase}/combined-reports") mustRunAfter checkAll } -gradle.taskGraph.whenReady({ graph -> +gradle.taskGraph.whenReady { graph -> tasks.getByName('allReports').reportOn rootProject.subprojects.collect{ it.tasks.withType(Test) }.flatten() -}) +} def writeProperties(def parent, def name, def comment, def propsMap) { parent.exists() || parent.mkdirs() @@ -1407,26 +1467,21 @@ if (rootProject.hasProperty('trackBuildTime') ) { } } -// log build output to buildOutput.log +// log build output to buildOutput.log in addition to console output def buildOutput = new File("${rootDir}/buildOutput.log") - // delete build output file if it has become large if (buildOutput.length() > 1000000) { delete buildOutput } - -gradle.services.get(LoggingOutputInternal).addStandardOutputListener (new StandardOutputListener () { +def gradleLogger = new org.gradle.api.logging.StandardOutputListener() { void onOutput(CharSequence output) { buildOutput << output } -}) - -gradle.services.get(LoggingOutputInternal).addStandardErrorListener (new StandardOutputListener () { - void onOutput(CharSequence output) { - buildOutput << output - } -}) +} +def loggerService = gradle.services.get(LoggingOutputInternal) +loggerService.addStandardOutputListener(gradleLogger) +loggerService.addStandardErrorListener(gradleLogger) println() println('-------------------------------------------------') diff --git a/cluster/build.gradle b/cluster/build.gradle index 081ac93272..8775faa4a8 100644 --- a/cluster/build.gradle +++ b/cluster/build.gradle @@ -128,10 +128,11 @@ dependencies { exclude(group: 'com.sun.jersey.contribs') exclude(group: 'com.google.protobuf', module: 'protobuf-java') exclude(group: 'com.jcraft', module: 'jsch') + exclude(group: 'org.apache.directory.server', module: 'apacheds-kerberos-codec') } testCompile project(':dunit') - testCompile 'it.unimi.dsi:fastutil:8.2.2' + testCompile "it.unimi.dsi:fastutil:${fastutilVersion}" testCompile "org.scalatest:scalatest_${scalaBinaryVersion}:${scalatestVersion}" if (new File(rootDir, 'aqp/build.gradle').exists() && rootProject.hasProperty('snappydata.enterprise')) { @@ -143,7 +144,7 @@ dependencies { // Creates the version properties file and writes it to the resources dir task createVersionPropertiesFile(dependsOn: 'processResources') { - def propertiesDir = file("${buildDir}/classes/main/io/snappydata") + def propertiesDir = file("${sourceSets.main.scala.outputDir}/io/snappydata") outputs.file "${propertiesDir}/SnappyDataVersion.properties" inputs.file "${rootProject.projectDir}/build.gradle" diff --git a/cluster/src/dunit/scala/io/snappydata/cluster/DDLRoutingDUnitTest.scala b/cluster/src/dunit/scala/io/snappydata/cluster/DDLRoutingDUnitTest.scala index 2d1bef88a2..8d789bfec7 100644 --- a/cluster/src/dunit/scala/io/snappydata/cluster/DDLRoutingDUnitTest.scala +++ b/cluster/src/dunit/scala/io/snappydata/cluster/DDLRoutingDUnitTest.scala @@ -19,10 +19,12 @@ package io.snappydata.cluster import java.sql.{Connection, DriverManager, SQLException} import com.pivotal.gemfirexd.internal.engine.{GfxdConstants, Misc} +import io.snappydata.SnappyFunSuite.resultSetToDataset import io.snappydata.test.dunit.{AvailablePortHelper, SerializableRunnable} -import org.apache.spark.sql.SnappyContext import org.apache.spark.sql.collection.Utils +import org.apache.spark.sql.store.ViewTest +import org.apache.spark.sql.{Dataset, Row, SnappyContext, SnappySession} class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { @@ -72,12 +74,12 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { val conn = getANetConnection(netPort1) // first fail a statement - failCreateTableXD(conn, tableName, true, " row ") + failCreateTableXD(conn, tableName, doFail = true, " row ") createTableXD(conn, tableName, " row ") tableMetadataAssertRowTable("APP", tableName) // Test create table - error for recreate - failCreateTableXD(conn, tableName, false, " row ") + failCreateTableXD(conn, tableName, doFail = false, " row ") // Drop Table and Recreate dropTableXD(conn, tableName) @@ -167,7 +169,7 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { vm2.invoke(classOf[ClusterManagerTestBase], "stopAny") val props = bootProps.clone().asInstanceOf[java.util.Properties] - props.put("distributed-system-id" , "1") + props.put("distributed-system-id", "1") props.put("server-groups", "sg1") val restartServer = new SerializableRunnable() { @@ -185,7 +187,7 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { var s = conn.createStatement() s.execute(s"CREATE TABLE $tableName (Col1 INT, Col2 INT, Col3 STRING)") insertDataXD(conn, tableName) - var snc = org.apache.spark.sql.SnappyContext(sc) + val snc = org.apache.spark.sql.SnappyContext(sc) verifyResultAndSchema(snc, tableName, 3) s.execute(s"ALTER TABLE $tableName ADD Col4 INT") @@ -207,21 +209,21 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { s.execute(s"insert into $tableName values (1,1)") s.execute(s"ALTER TABLE $tableName add constraint emp_uk unique (Col1)") try { - s.execute(s"insert into $tableName values (1,1)") + s.execute(s"insert into $tableName values (1,1)") } catch { case sqle: SQLException => if (sqle.getSQLState != "23505" || - !sqle.getMessage.contains("duplicate key value in a unique or" + - " primary key constraint or unique index")) { + !sqle.getMessage.contains("duplicate key value in a unique or" + + " primary key constraint or unique index")) { throw sqle } } // asynceventlistener s.execute("CREATE ASYNCEVENTLISTENER myListener (" + - " listenerclass 'com.pivotal.gemfirexd.callbacks.DBSynchronizer'" + - " initparams 'org.apache.derby.jdbc.EmbeddedDriver,jdbc:derby:newDB;create=true')" + - " server groups(sg1)") + " listenerclass 'com.pivotal.gemfirexd.callbacks.DBSynchronizer'" + + " initparams 'org.apache.derby.jdbc.EmbeddedDriver,jdbc:derby:newDB;create=true')" + + " server groups(sg1)") s.execute(s"ALTER TABLE $tableName SET ASYNCEVENTLISTENER (myListener) ") var rs = s.executeQuery(s"select * from SYS.SYSTABLES where tablename='$tableName'") @@ -287,7 +289,8 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { var cnt = 0 while (rs.next()) { cnt += 1 - rs.getInt(1); rs.getInt(2); + rs.getInt(1) + rs.getInt(2) } assert(cnt == 5, cnt) @@ -296,7 +299,9 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { cnt = 0 while (rs2.next()) { cnt += 1 - rs2.getInt(1); rs2.getInt(2); rs2.getInt(3); + rs2.getInt(1) + rs2.getInt(2) + rs2.getInt(3) } assert(cnt == 5, cnt) @@ -324,6 +329,36 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { dropTableXD(conn, tableName) } + def testViews(): Unit = { + val netPort1 = AvailablePortHelper.getRandomAvailableTCPPort + vm2.invoke(classOf[ClusterManagerTestBase], "startNetServer", netPort1) + + val session = new SnappySession(sc) + ViewTest.createTables(session) + + def newExecution(): String => Dataset[Row] = { + val session = new SnappySession(sc) + val conn = getANetConnection(netPort1) + val stmt = conn.createStatement() + resultSetToDataset(session, stmt) + } + + val conn = getANetConnection(netPort1) + val stmt = conn.createStatement() + ViewTest.testTemporaryView(resultSetToDataset(session, stmt), newExecution) + ViewTest.testGlobalTemporaryView(resultSetToDataset(session, stmt), newExecution) + ViewTest.testTemporaryViewUsing(resultSetToDataset(session, stmt), newExecution) + ViewTest.testGlobalTemporaryViewUsing(resultSetToDataset(session, stmt), newExecution) + ViewTest.testPersistentView(resultSetToDataset(session, stmt), checkPlans = false, + newExecution, restartSpark) + ViewTest.dropTables(new SnappySession(sc)) + } + + private def restartSpark(): Unit = { + ClusterManagerTestBase.stopAny() + ClusterManagerTestBase.startSnappyLead(ClusterManagerTestBase.locatorPort, bootProps) + } + def createTableXD(conn: Connection, tableName: String, usingStr: String): Unit = { val s = conn.createStatement() @@ -421,7 +456,7 @@ class DDLRoutingDUnitTest(val s: String) extends ClusterManagerTestBase(s) { s.execute("CREATE EXTERNAL TABLE airlineRef_temp(Code VARCHAR(25), " + "Description VARCHAR(25)) USING parquet OPTIONS()") } catch { - case e: java.sql.SQLException => + case _: java.sql.SQLException => // println("Exception stack. create. ex=" + e.getMessage + // " ,stack=" + ExceptionUtils.getFullStackTrace(e)) } diff --git a/cluster/src/dunit/scala/io/snappydata/cluster/DistributedIndexDUnitTest.scala b/cluster/src/dunit/scala/io/snappydata/cluster/DistributedIndexDUnitTest.scala index 7a9ce1bd43..088165761f 100644 --- a/cluster/src/dunit/scala/io/snappydata/cluster/DistributedIndexDUnitTest.scala +++ b/cluster/src/dunit/scala/io/snappydata/cluster/DistributedIndexDUnitTest.scala @@ -114,7 +114,7 @@ class DistributedIndexDUnitTest(s: String) extends ClusterManagerTestBase(s) { // } // executeQ(s"select * from $tableName where col2 = 'aaa' ") { -// CreateIndexTest.validateIndex(Seq.empty, tableName)(_) +// CreateIndexTest.validateIndex(Nil, tableName)(_) // } executeQ(s"select * from $tableName where col2 = 'bbb' and col3 = 'halo' ") { @@ -164,7 +164,7 @@ class DistributedIndexDUnitTest(s: String) extends ClusterManagerTestBase(s) { // } // executeQ(s"select * from $tableName where col2 = 'aaa' ") { - // CreateIndexTest.validateIndex(Seq.empty, tableName)(_) + // CreateIndexTest.validateIndex(Nil, tableName)(_) // } System.setProperty("LOG-NOW", "xxx") @@ -228,7 +228,7 @@ class DistributedIndexDUnitTest(s: String) extends ClusterManagerTestBase(s) { // } // executeQ(s"select * from $tableName where col2 = 'aaa' ") { - // CreateIndexTest.validateIndex(Seq.empty, tableName)(_) + // CreateIndexTest.validateIndex(Nil, tableName)(_) // } System.setProperty("LOG-NOW", "xxx") diff --git a/cluster/src/dunit/scala/io/snappydata/cluster/QueryRoutingDUnitTest.scala b/cluster/src/dunit/scala/io/snappydata/cluster/QueryRoutingDUnitTest.scala index fa4ec20a2d..4e507e39be 100644 --- a/cluster/src/dunit/scala/io/snappydata/cluster/QueryRoutingDUnitTest.scala +++ b/cluster/src/dunit/scala/io/snappydata/cluster/QueryRoutingDUnitTest.scala @@ -761,6 +761,7 @@ class QueryRoutingDUnitTest(val s: String) TPCHUtils.createAndLoadTables(snc, true) + snc.setConf(Property.EnableExperimentalFeatures.name, "true") snc.sql( s"""CREATE INDEX idx_orders_cust ON orders(o_custkey) options (COLOCATE_WITH 'customer') diff --git a/cluster/src/dunit/scala/io/snappydata/cluster/SplitSnappyClusterDUnitTest.scala b/cluster/src/dunit/scala/io/snappydata/cluster/SplitSnappyClusterDUnitTest.scala index 184f369567..cb0f67ca5b 100644 --- a/cluster/src/dunit/scala/io/snappydata/cluster/SplitSnappyClusterDUnitTest.scala +++ b/cluster/src/dunit/scala/io/snappydata/cluster/SplitSnappyClusterDUnitTest.scala @@ -435,7 +435,7 @@ object SplitSnappyClusterDUnitTest snc.dropTable("splitModeTable1", ifExists = true) // recreate the dropped table - var expected = Seq.empty[ComplexData] + var expected: Seq[ComplexData] = Nil if (isComplex) { expected = createComplexTableUsingDataSourceAPI(snc, "splitModeTable1", tableType, props) diff --git a/cluster/src/dunit/scala/org/apache/spark/DynamicJarInstallationDUnitTest.scala b/cluster/src/dunit/scala/org/apache/spark/DynamicJarInstallationDUnitTest.scala index 62053cb29b..debcad9efa 100644 --- a/cluster/src/dunit/scala/org/apache/spark/DynamicJarInstallationDUnitTest.scala +++ b/cluster/src/dunit/scala/org/apache/spark/DynamicJarInstallationDUnitTest.scala @@ -67,7 +67,7 @@ class DynamicJarInstallationDUnitTest(val s: String) var testJar = DynamicJarInstallationDUnitTest.createJarWithClasses( classNames = Seq("FakeJobClass", "FakeJobClass1"), toStringValue = "1", - Seq.empty, Seq.empty, + Nil, Nil, "testJar_SNAPPY_JOB_SERVER_JAR_%s.jar".format(System.currentTimeMillis())) var jobCompleted = false @@ -106,7 +106,7 @@ class DynamicJarInstallationDUnitTest(val s: String) testJar = DynamicJarInstallationDUnitTest.createJarWithClasses( classNames = Seq("FakeJobClass", "FakeJobClass1"), toStringValue = "2", - Seq.empty, Seq.empty, + Nil, Nil, "testJar_SNAPPY_JOB_SERVER_JAR_%s.jar".format(System.currentTimeMillis())) localProperty = (Seq("app1", DateTime.now) ++ Array[URL](testJar)).mkString(",") @@ -169,4 +169,4 @@ object DynamicJarInstallationDUnitTest { else false } } -} \ No newline at end of file +} diff --git a/cluster/src/dunit/scala/org/apache/spark/sql/udf/UserDefinedFunctionsDUnitTest.scala b/cluster/src/dunit/scala/org/apache/spark/sql/udf/UserDefinedFunctionsDUnitTest.scala index 973a425329..d4421745b4 100644 --- a/cluster/src/dunit/scala/org/apache/spark/sql/udf/UserDefinedFunctionsDUnitTest.scala +++ b/cluster/src/dunit/scala/org/apache/spark/sql/udf/UserDefinedFunctionsDUnitTest.scala @@ -279,7 +279,7 @@ object UserDefinedFunctionsDUnitTest { def createUDFClass(name: String, code: String): File = { TestUtils.createCompiledClass(name, destDir, - getJavaSourceFromString(name, code), Seq.empty[URL]) + getJavaSourceFromString(name, code), Nil) } def createJarFile(files: Seq[File]): String = { diff --git a/cluster/src/main/java/io/snappydata/gemxd/SnappySystemAdmin.java b/cluster/src/main/java/io/snappydata/gemxd/SnappySystemAdmin.java index c20d1f6a51..be595f1541 100644 --- a/cluster/src/main/java/io/snappydata/gemxd/SnappySystemAdmin.java +++ b/cluster/src/main/java/io/snappydata/gemxd/SnappySystemAdmin.java @@ -32,7 +32,7 @@ public class SnappySystemAdmin extends GfxdSystemAdmin { - SnappySystemAdmin() { + private SnappySystemAdmin() { super(); UTIL_Tools_DSProps = "UTIL_Snappy_Tools_DSProps"; UTIL_DSProps_HelpPost = "UTIL_Snappy_Tools_DSProps_HelpPost"; @@ -91,31 +91,28 @@ public void invoke(String[] args) { super.invoke(args); } finally { - // remove zero-sized log-file - if (this.defaultLogFileName != null) { - try { - File logFile = new File(this.defaultLogFileName); - if (logFile.exists() && logFile.isFile() && logFile.length() == 0) { - logFile.delete(); - } - } catch (Throwable t) { - // ignore at this point + // remove zero-sized generatedcode.log file + try { + File codeLogFile = new File("generatedcode.log"); + if (codeLogFile.exists() && codeLogFile.isFile() && codeLogFile.length() == 0) { + codeLogFile.delete(); } + } catch (Throwable t) { + // ignore at this point } } } public boolean handleVersion(String[] args) { - String cmd = null; - final ArrayList cmdLine = new ArrayList(Arrays.asList(args)); + String cmd; + final ArrayList cmdLine = new ArrayList<>(Arrays.asList(args)); try { Iterator it = cmdLine.iterator(); while (it.hasNext()) { String arg = it.next(); if (arg.startsWith("-")) { checkDashArg(null, arg, it); - } - else { + } else { break; } } @@ -159,9 +156,8 @@ public boolean handleVersion(String[] args) { } if (cmd.equalsIgnoreCase("version")) { - Boolean optionOK = (cmdLine.size() == 0); + boolean optionOK = (cmdLine.size() == 0); if (cmdLine.size() == 1) { - optionOK = false; String option = cmdLine.get(0); if ("CREATE".equals(option) || "FULL".equalsIgnoreCase(option)) { optionOK = true; diff --git a/cluster/src/test/resources/log4j.properties b/cluster/src/test/resources/log4j.properties index 222d083979..85513aca15 100644 --- a/cluster/src/test/resources/log4j.properties +++ b/cluster/src/test/resources/log4j.properties @@ -38,11 +38,20 @@ log4j.rootCategory=INFO, file log4j.appender.file=org.apache.log4j.RollingFileAppender log4j.appender.file.append=true log4j.appender.file.file=snappydata.log -log4j.appender.file.MaxFileSize=100MB +log4j.appender.file.MaxFileSize=1GB log4j.appender.file.MaxBackupIndex=10000 log4j.appender.file.layout=io.snappydata.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS zzz} %t %p %c{1}: %m%n +# Appender for code dumps of WholeStageCodegenExec, CodeGenerator etc +log4j.appender.code=org.apache.log4j.RollingFileAppender +log4j.appender.code.append=true +log4j.appender.code.file=generatedcode.log +log4j.appender.code.MaxFileSize=1GB +log4j.appender.code.MaxBackupIndex=10000 +log4j.appender.code.layout=io.snappydata.log4j.PatternLayout +log4j.appender.code.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS zzz} %t %p %c{1}: %m%n + # Console appender log4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.target=System.out @@ -92,8 +101,6 @@ log4j.logger.org.apache.spark.scheduler.FairSchedulableBuilder=WARN log4j.logger.org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend$DriverEndpoint=WARN log4j.logger.org.apache.spark.storage.BlockManagerInfo=WARN log4j.logger.org.apache.hadoop.hive=WARN -# for all Spark generated code (including ad-hoc UnsafeProjection calls etc) -log4j.logger.org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator=WARN log4j.logger.org.apache.spark.sql.execution.datasources=WARN log4j.logger.org.apache.spark.scheduler.SnappyTaskSchedulerImpl=WARN log4j.logger.org.apache.spark.MapOutputTrackerMasterEndpoint=WARN @@ -110,7 +117,20 @@ log4j.logger.org.datanucleus=ERROR log4j.logger.org.apache.spark.Task=WARN log4j.logger.org.apache.spark.sql.catalyst.parser.CatalystSqlParser=WARN +# Keep log-level of some classes as INFO even if root level is higher +log4j.logger.io.snappydata.impl.LeadImpl=INFO +log4j.logger.io.snappydata.impl.ServerImpl=INFO +log4j.logger.io.snappydata.impl.LocatorImpl=INFO +log4j.logger.spray.can.server.HttpListener=INFO + # for generated code of plans -# log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenExec=DEBUG +log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenExec=DEBUG, code +log4j.additivity.org.apache.spark.sql.execution.WholeStageCodegenExec=false +log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenRDD=INFO, code +log4j.additivity.org.apache.spark.sql.execution.WholeStageCodegenRDD=false +# for all Spark generated code (including ad-hoc UnsafeProjection calls etc) +log4j.logger.org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator=WARN, code +log4j.additivity.org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator=false # for SnappyData generated code used on store (ComplexTypeSerializer, JDBC inserts ...) -# log4j.logger.org.apache.spark.sql.store.CodeGeneration=DEBUG +log4j.logger.org.apache.spark.sql.store.CodeGeneration=INFO, code +log4j.additivity.org.apache.spark.sql.store.CodeGeneration=false diff --git a/cluster/src/test/scala/io/snappydata/QueryTest.scala b/cluster/src/test/scala/io/snappydata/QueryTest.scala index 500f36cfdd..c76d29b7b8 100644 --- a/cluster/src/test/scala/io/snappydata/QueryTest.scala +++ b/cluster/src/test/scala/io/snappydata/QueryTest.scala @@ -67,8 +67,7 @@ class QueryTest extends SnappyFunSuite { val df = snContext.sql("SELECT title, price FROM titles WHERE EXISTS (" + "SELECT * FROM sales WHERE sales.title_id = titles.title_id AND qty >30)") - - df.show() + df.collect() } test("SNAP-1159_1482") { @@ -139,49 +138,49 @@ class QueryTest extends SnappyFunSuite { "PARTITION_BY 'col2'," + "BUCKETS '1')") snc.sql("insert into ColumnTable(\"a/b\",col2,col3) values(1,2,3)") - snc.sql("select col2,col3 from columnTable").show() - snc.sql("select col2, col3, `a/b` from columnTable").show() - snc.sql("select col2, col3, \"a/b\" from columnTable").show() - snc.sql("select col2, col3, \"A/B\" from columnTable").show() - snc.sql("select col2, col3, `A/B` from columnTable").show() - - snc.sql("select col2,col3 from columnTable").show() - snc.table("columnTable").select("col3", "col2", "a/b").show() - snc.table("columnTable").select("col3", "Col2", "A/b").show() - snc.table("columnTable").select("COL3", "Col2", "A/B").show() - snc.table("columnTable").select("COL3", "Col2", "`A/B`").show() - snc.table("columnTable").select("COL3", "Col2", "`a/b`").show() + snc.sql("select col2,col3 from columnTable").collect() + snc.sql("select col2, col3, `a/b` from columnTable").collect() + snc.sql("select col2, col3, \"a/b\" from columnTable").collect() + snc.sql("select col2, col3, \"A/B\" from columnTable").collect() + snc.sql("select col2, col3, `A/B` from columnTable").collect() + + snc.sql("select col2,col3 from columnTable").collect() + snc.table("columnTable").select("col3", "col2", "a/b").collect() + snc.table("columnTable").select("col3", "Col2", "A/b").collect() + snc.table("columnTable").select("COL3", "Col2", "A/B").collect() + snc.table("columnTable").select("COL3", "Col2", "`A/B`").collect() + snc.table("columnTable").select("COL3", "Col2", "`a/b`").collect() snc.conf.set("spark.sql.caseSensitive", "true") try { - snc.table("columnTable").select("col3", "col2", "a/b").show() + snc.table("columnTable").select("col3", "col2", "a/b").collect() fail("expected to fail for case-sensitive=true") } catch { case _: AnalysisException => // expected } try { - snc.table("columnTable").select("COL3", "COL2", "A/B").show() + snc.table("columnTable").select("COL3", "COL2", "A/B").collect() fail("expected to fail for case-sensitive=true") } catch { case _: AnalysisException => // expected } try { - snc.sql("select col2, col3, \"A/B\" from columnTable").show() + snc.sql("select col2, col3, \"A/B\" from columnTable").collect() fail("expected to fail for case-sensitive=true") } catch { case _: AnalysisException => // expected } try { - snc.sql("select COL2, COL3, `A/B` from columnTable").show() + snc.sql("select COL2, COL3, `A/B` from columnTable").collect() fail("expected to fail for case-sensitive=true") } catch { case _: AnalysisException => // expected } // hive meta-store is case-insensitive so column table names are not - snc.sql("select COL2, COL3, \"a/b\" from columnTable").show() - snc.sql("select COL2, COL3, `a/b` from ColumnTable").show() - snc.table("columnTable").select("COL3", "COL2", "a/b").show() - snc.table("COLUMNTABLE").select("COL3", "COL2", "a/b").show() + snc.sql("select COL2, COL3, \"a/b\" from columnTable").collect() + snc.sql("select COL2, COL3, `a/b` from ColumnTable").collect() + snc.table("columnTable").select("COL3", "COL2", "a/b").collect() + snc.table("COLUMNTABLE").select("COL3", "COL2", "a/b").collect() } private def setupTestData(session: SnappySession): Unit = { diff --git a/cluster/src/test/scala/io/snappydata/Snap_213.scala b/cluster/src/test/scala/io/snappydata/Snap_213.scala index 93d4d7d835..6a7e18fd7f 100644 --- a/cluster/src/test/scala/io/snappydata/Snap_213.scala +++ b/cluster/src/test/scala/io/snappydata/Snap_213.scala @@ -30,7 +30,7 @@ class Snap_213 override def beforeAll(): Unit = { super.beforeAll() // force boot GemFireXD if not booted; just getting SnappyContext should do - println(s"Using SnappyContext $snc") + logInfo(s"Using SnappyContext $snc") } override def afterAll(): Unit = { @@ -42,7 +42,7 @@ class Snap_213 DriverRegistry.register(Constant.JDBC_CLIENT_DRIVER) val hostPort = TestUtil.startNetServer() - println("server started ") + logInfo("server started") val conn: Connection = DriverManager.getConnection( "jdbc:snappydata://" + hostPort) diff --git a/cluster/src/test/scala/io/snappydata/benchmark/TPCH_Queries.scala b/cluster/src/test/scala/io/snappydata/benchmark/TPCH_Queries.scala index aaecd957b5..4c34944d2b 100644 --- a/cluster/src/test/scala/io/snappydata/benchmark/TPCH_Queries.scala +++ b/cluster/src/test/scala/io/snappydata/benchmark/TPCH_Queries.scala @@ -17,13 +17,13 @@ package io.snappydata.benchmark -import scala.util.Random +import org.apache.spark.Logging -object TPCH_Queries { +object TPCH_Queries extends Logging { private var random = new scala.util.Random(42) - def setRandomSeed(randomSeed : Integer = 42): Unit ={ + def setRandomSeed(randomSeed: Integer = 42): Unit = { this.random = new scala.util.Random(randomSeed) } @@ -112,13 +112,12 @@ object TPCH_Queries { } def createQuery(query: String, paramters: Array[String]): String = { - // scalastyle:off println var generatedQuery = query for (s <- paramters) { - println(s"KBKBKB : createQuery : $s") + logInfo(s"KBKBKB : createQuery : $s") generatedQuery = generatedQuery.replaceFirst("\\?", s) } - println(s"KBKBKB : My query : $generatedQuery") + logInfo(s"KBKBKB : My query : $generatedQuery") generatedQuery } @@ -298,7 +297,7 @@ object TPCH_Queries { val fromDate = java.time.LocalDate.of(1995, 3, 1) val toDate = java.time.LocalDate.of(1995, 3, 31) val diff = java.time.temporal.ChronoUnit.DAYS.between(fromDate, toDate) - //val random = new random(System.nanoTime) + // val random = new random(System.nanoTime) // You may want a different seed val selectedDate = fromDate.plusDays(random.nextInt(diff.toInt)) Array(segment, selectedDate.toString, selectedDate.toString) diff --git a/cluster/src/test/scala/io/snappydata/benchmark/snappy/TPCH.scala b/cluster/src/test/scala/io/snappydata/benchmark/snappy/TPCH.scala index 02b474fbe4..b651bb0ebf 100644 --- a/cluster/src/test/scala/io/snappydata/benchmark/snappy/TPCH.scala +++ b/cluster/src/test/scala/io/snappydata/benchmark/snappy/TPCH.scala @@ -901,8 +901,7 @@ trait DynamicQueryGetter extends TPCHBase { Seq(args(i), args(i)) case (_, i) :: _ if i < args.length => Seq(args(i)) - case _ => - Seq.empty + case _ => Nil }).toList } diff --git a/cluster/src/test/scala/io/snappydata/cluster/PreparedQueryRoutingSingleNodeSuite.scala b/cluster/src/test/scala/io/snappydata/cluster/PreparedQueryRoutingSingleNodeSuite.scala index 85e89e1132..799d72bae1 100644 --- a/cluster/src/test/scala/io/snappydata/cluster/PreparedQueryRoutingSingleNodeSuite.scala +++ b/cluster/src/test/scala/io/snappydata/cluster/PreparedQueryRoutingSingleNodeSuite.scala @@ -23,15 +23,14 @@ import com.pivotal.gemfirexd.internal.engine.distributed.utils.GemFireXDUtils import io.snappydata.{SnappyFunSuite, SnappyTableStatsProviderService} import org.scalatest.BeforeAndAfterAll -import org.apache.spark.SparkConf -import org.apache.spark.sql.{ParseException, SnappyContext, SnappySession} +import org.apache.spark.sql.{SnappyContext, SnappySession} +import org.apache.spark.{Logging, SparkConf} class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll { - // Logger.getLogger("org").setLevel(Level.DEBUG) + private val default_chunk_size = GemFireXDUtils.DML_MAX_CHUNK_SIZE - val default_chunk_size = GemFireXDUtils.DML_MAX_CHUNK_SIZE - protected override def newSparkConf(addOn: (SparkConf) => SparkConf): SparkConf = { + protected override def newSparkConf(addOn: SparkConf => SparkConf): SparkConf = { /** * Setting local[n] here actually supposed to affect number of reservoir created * while sampling. @@ -237,7 +236,7 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val serverHostPort = TestUtil.startNetServer() - // println("network server started") + // logInfo("network server started") PreparedQueryRoutingSingleNodeSuite.insertRows(tableName, 1000, serverHostPort) query0(tableName, serverHostPort) } finally { @@ -257,7 +256,7 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA "options( partition_by 'ol_int_id, ol_int2_id', buckets '2')") val serverHostPort = TestUtil.startNetServer() - // println("network server started") + // logInfo("network server started") PreparedQueryRoutingSingleNodeSuite.insertRows(tableName, 100, serverHostPort) query6(tableName, serverHostPort) query7(tableName, serverHostPort) @@ -482,16 +481,12 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val i = rs.getInt(1) // val j = rs.getInt(2) // val s = rs.getString(3) - // scalastyle:off println - // println(s"row($index) $i $j $s ") - // scalastyle:on println + // logInfo(s"row($index) $i $j $s ") index += 1 } assert(index == 20) - // scalastyle:off println - // println(s"$qryName Number of rows read " + index) - // scalastyle:on println + // logInfo(s"$qryName Number of rows read " + index) rs.close() // Thread.sleep(1000000) } finally { @@ -525,16 +520,12 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val i = rs.getInt(1) // val j = rs.getInt(2) // val s = rs.getString(3) - // scalastyle:off println - // println(s"row($index) $i $j $s ") - // scalastyle:on println + // logInfo(s"row($index) $i $j $s ") index += 1 } assert(index == 100) - // scalastyle:off println - // println(s"$qryName Number of rows read " + index) - // scalastyle:on println + // logInfo(s"$qryName Number of rows read " + index) rs.close() // Thread.sleep(1000000) } finally { @@ -565,16 +556,12 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val i = rs.getInt(1) // val j = rs.getInt(2) // val s = rs.getString(3) - // scalastyle:off println - // println(s"row($index) $i $j $s ") - // scalastyle:on println + // logInfo(s"row($index) $i $j $s ") index += 1 } assert(index == 100) - // scalastyle:off println - // println(s"$qryName Number of rows read " + index) - // scalastyle:on println + // logInfo(s"$qryName Number of rows read " + index) rs.close() // Thread.sleep(1000000) } finally { @@ -606,16 +593,12 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val i = rs.getInt(1) // val j = rs.getInt(2) // val s = rs.getString(3) - // scalastyle:off println - // println(s"row($index) $i $j $s ") - // scalastyle:on println + // logInfo(s"row($index) $i $j $s ") index += 1 } assert(index == 20) - // scalastyle:off println - // println(s"$qryName Number of rows read " + index) - // scalastyle:on println + // logInfo(s"$qryName Number of rows read " + index) rs.close() // Thread.sleep(1000000) } finally { @@ -649,16 +632,12 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val i = rs.getInt(1) // val j = rs.getInt(2) // val s = rs.getString(3) - // scalastyle:off println - // println(s"row($index) $i $j $s ") - // scalastyle:on println + // logInfo(s"row($index) $i $j $s ") index += 1 } assert(index == 20) - // scalastyle:off println - // println(s"$qryName Number of rows read " + index) - // scalastyle:on println + // logInfo(s"$qryName Number of rows read " + index) rs.close() // Thread.sleep(1000000) } finally { @@ -690,16 +669,12 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val i = rs.getInt(1) // val j = rs.getInt(2) // val s = rs.getString(3) - // scalastyle:off println - // println(s"row($index) $i $j $s ") - // scalastyle:on println + // logInfo(s"row($index) $i $j $s ") index += 1 } assert(index == 100) - // scalastyle:off println - // println(s"$qryName Number of rows read " + index) - // scalastyle:on println + // logInfo(s"$qryName Number of rows read " + index) rs.close() // Thread.sleep(1000000) } finally { @@ -731,16 +706,12 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val i = rs.getInt(1) // val j = rs.getInt(2) // val s = rs.getString(3) - // scalastyle:off println - // println(s"row($index) $i $j $s ") - // scalastyle:on println + // logInfo(s"row($index) $i $j $s ") index += 1 } assert(index == 20) - // scalastyle:off println - // println(s"$qryName Number of rows read " + index) - // scalastyle:on println + // logInfo(s"$qryName Number of rows read " + index) rs.close() // Thread.sleep(1000000) } finally { @@ -766,7 +737,7 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA val serverHostPort = TestUtil.startNetServer() - // println("network server started") + // logInfo("network server started") PreparedQueryRoutingSingleNodeSuite.insertRows(tableName1, 1000, serverHostPort) PreparedQueryRoutingSingleNodeSuite.insertRows(tableName2, 1000, serverHostPort) query1(tableName1, tableName2, serverHostPort) @@ -782,14 +753,14 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA test("update delete on column table") { val snc = this.snc val serverHostPort = TestUtil.startNetServer() - // println("network server started") + // logInfo("network server started") PreparedQueryRoutingSingleNodeSuite.updateDeleteOnColumnTable(snc, serverHostPort) } test("SNAP-1981: Equality on string columns") { val snc = this.snc val serverHostPort = TestUtil.startNetServer() - // println("network server started") + // logInfo("network server started") PreparedQueryRoutingSingleNodeSuite.equalityOnStringColumn(snc, serverHostPort) } @@ -830,15 +801,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getBigDecimal(2) - // scalastyle:off println - println(s"1-row($index) $i $j") - // scalastyle:on println + logInfo(s"1-row($index) $i $j") index += 1 assert(i == 1 || i == 2) } - // scalastyle:off println - println(s"1-Number of rows read " + index) - // scalastyle:on println + logInfo(s"1-Number of rows read " + index) assert(index == 2) assert(cacheMap.size() == 1) @@ -848,15 +815,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getBigDecimal(2) - // scalastyle:off println - println(s"2-row($index) $i $j") - // scalastyle:on println + logInfo(s"2-row($index) $i $j") index += 1 assert(i == 1 || i == 2 || i == 3) } - // scalastyle:off println - println(s"2-Number of rows read " + index) - // scalastyle:on println + logInfo(s"2-Number of rows read " + index) assert(index == 3) assert(cacheMap.size() == 1) close(prepStatement0) @@ -869,15 +832,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getBigDecimal(2) - // scalastyle:off println - println(s"3-row($index) $i $j") - // scalastyle:on println + logInfo(s"3-row($index) $i $j") index += 1 assert(i > 2 && i < 6) } - // scalastyle:off println - println(s"3-Number of rows read " + index) - // scalastyle:on println + logInfo(s"3-Number of rows read " + index) assert(index == 3) assert(cacheMap.size() == 2) @@ -887,15 +846,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getBigDecimal(2) - // scalastyle:off println - println(s"4-row($index) $i $j") - // scalastyle:on println + logInfo(s"4-row($index) $i $j") index += 1 assert(i > 3 && i < 7) } - // scalastyle:off println - println(s"4-Number of rows read " + index) - // scalastyle:on println + logInfo(s"4-Number of rows read " + index) assert(index == 3) assert(cacheMap.size() == 2) close(prepStatement1) @@ -909,15 +864,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getString(2) - // scalastyle:off println - println(s"5-row($index) $i $j") - // scalastyle:on println + logInfo(s"5-row($index) $i $j") index += 1 assert(i == 1) } - // scalastyle:off println - println(s"5-Number of rows read " + index) - // scalastyle:on println + logInfo(s"5-Number of rows read " + index) assert(index == 1) assert(cacheMap.size() == 3) @@ -927,15 +878,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getString(2) - // scalastyle:off println - println(s"6-row($index) $i $j") - // scalastyle:on println + logInfo(s"6-row($index) $i $j") index += 1 assert(i == 2) } - // scalastyle:off println - println(s"6-Number of rows read " + index) - // scalastyle:on println + logInfo(s"6-Number of rows read " + index) assert(index == 1) assert(cacheMap.size() == 3) close(prepStatement2) @@ -953,15 +900,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA index = 0 while (update.next()) { val i = update.getInt(1) - // scalastyle:off println - println(s"7-row($index) $i") - // scalastyle:on println + logInfo(s"7-row($index) $i") index += 1 assert(i == 1 || i == 2) } - // scalastyle:off println - println(s"7-Number of rows read " + index) - // scalastyle:on println + logInfo(s"7-Number of rows read " + index) assert(index == 2) assert(cacheMap.size() == 0) @@ -971,15 +914,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA index = 0 while (update.next()) { val i = update.getInt(1) - // scalastyle:off println - println(s"8-row($index) $i") - // scalastyle:on println + logInfo(s"8-row($index) $i") index += 1 assert(i == 2 || i == 3) } - // scalastyle:off println - println(s"8-Number of rows read " + index) - // scalastyle:on println + logInfo(s"8-Number of rows read " + index) assert(index == 2) assert(cacheMap.size() == 0) close(prepStatement3) @@ -993,15 +932,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getBigDecimal(2) - // scalastyle:off println - println(s"9-row($index) $i $j") - // scalastyle:on println + logInfo(s"9-row($index) $i $j") index += 1 assert(i == 1 || i == 2) } - // scalastyle:off println - println(s"9-Number of rows read " + index) - // scalastyle:on println + logInfo(s"9-Number of rows read " + index) assert(index == 2) assert(cacheMap.size() == 1) close(prepStatement4) @@ -1016,15 +951,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA while (update.next()) { val i = update.getInt(1) val j = update.getBigDecimal(2) - // scalastyle:off println - println(s"10-row($index) $i $j") - // scalastyle:on println + logInfo(s"10-row($index) $i $j") index += 1 assert(i == 1) } - // scalastyle:off println - println(s"10-Number of rows read " + index) - // scalastyle:on println + logInfo(s"10-Number of rows read " + index) assert(index == 1) assert(cacheMap.size() == 2) close(prepStatement5) @@ -1139,15 +1070,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA "MCI", "STL", "MSY", "SAT", "SNA", "DAL", "PDX", "SMF", "HOU", "SAN", "OAK", "SJC") while (update.next()) { val s = update.getString(3) - // scalastyle:off println - // println(s"1-row($index) $s ") - // scalastyle:on println - result1.contains(s) + // logInfo(s"1-row($index) $s ") + assert(result1.contains(s)) index += 1 } - // scalastyle:off println - println(s"1-Number of rows read " + index) - // scalastyle:on println + logInfo(s"1-Number of rows read " + index) assert(index == 46) assert(cacheMap.size() == 0) @@ -1164,15 +1091,11 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA "SMF", "ONT", "SJC", "OAK", "HOU", "DAL", "BUR") while (update.next()) { val s = update.getString(3) - // scalastyle:off println - // println(s"2-row($index) $s ") - // scalastyle:on println - result2.contains(s) + // logInfo(s"2-row($index) $s ") + assert(result2.contains(s)) index += 1 } - // scalastyle:off println - println(s"2-Number of rows read " + index) - // scalastyle:on println + logInfo(s"2-Number of rows read " + index) assert(index == 65) assert(cacheMap.size() == 0) close(prepStatement1) @@ -1186,7 +1109,7 @@ class PreparedQueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndA } } -object PreparedQueryRoutingSingleNodeSuite{ +object PreparedQueryRoutingSingleNodeSuite extends Logging { def insertRows(tableName: String, numRows: Int, serverHostPort: String): Unit = { @@ -1211,9 +1134,7 @@ object PreparedQueryRoutingSingleNodeSuite{ }) val ret = prepareStatement.executeBatch() ret.foreach(r => assert(r == 1)) - // scalastyle:off println - println(s"committed $numRows rows") - // scalastyle:on println + logInfo(s"committed $numRows rows") } finally { prepareStatement.close() conn.close() @@ -1229,22 +1150,16 @@ object PreparedQueryRoutingSingleNodeSuite{ val i = rs.getInt(1) val j = rs.getInt(2) val s = rs.getString(3) - // scalastyle:off println - println(s"$qry row($index) $i $j $s ") - // scalastyle:on println + logInfo(s"$qry row($index) $i $j $s ") index += 1 assert(results.contains(i)) } - // scalastyle:off println - println(s"$qry Number of rows read " + index) - // scalastyle:on println + logInfo(s"$qry Number of rows read " + index) assert(index == results.length) rs.close() - // scalastyle:off println - println(s"cachemapsize = ${cacheMapSize} and .size = ${cacheMap.size()}") - // scalastyle:on println + logInfo(s"cachemapsize = $cacheMapSize and .size = ${cacheMap.size()}") assert( cacheMap.size() == cacheMapSize || -1 == cacheMapSize) } diff --git a/cluster/src/test/scala/io/snappydata/cluster/QueryRoutingSingleNodeSuite.scala b/cluster/src/test/scala/io/snappydata/cluster/QueryRoutingSingleNodeSuite.scala index 64f196b3a8..4600a926fa 100644 --- a/cluster/src/test/scala/io/snappydata/cluster/QueryRoutingSingleNodeSuite.scala +++ b/cluster/src/test/scala/io/snappydata/cluster/QueryRoutingSingleNodeSuite.scala @@ -25,7 +25,7 @@ import io.snappydata.{SnappyFunSuite, SnappyTableStatsProviderService} import org.scalatest.BeforeAndAfterAll import org.apache.spark.sql.SnappySession -import org.apache.spark.sql.store.{ColumnTableBatchInsertTest, MetadataTest} +import org.apache.spark.sql.store.ColumnTableBatchInsertTest class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll { @@ -39,17 +39,13 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll // results in multiple batches setDMLMaxChunkSize(50L) serverHostPort = TestUtil.startNetServer() - // scalastyle:off println - println("network server started") - // scalastyle:on println + logInfo("network server started") } override def afterAll(): Unit = { setDMLMaxChunkSize(default_chunk_size) TestUtil.stopNetServer() - // scalastyle:off println - println("network server stopped") - // scalastyle:on println + logInfo("network server stopped") super.afterAll() } @@ -75,9 +71,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll } }) stmt.executeBatch() - // scalastyle:off println - println(s"committed $numRows rows") - // scalastyle:on println + logInfo(s"committed $numRows rows") } finally { stmt.close() conn.close() @@ -97,9 +91,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll rs.getInt(1) index += 1 } - // scalastyle:off println - println("Number of rows read " + index) - // scalastyle:on println + logInfo("Number of rows read " + index) rs.close() } finally { stmt.close() @@ -135,9 +127,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll } }) stmt.executeBatch() - // scalastyle:off println - println(s"committed $numRows rows") - // scalastyle:on println + logInfo(s"committed $numRows rows") } finally { stmt.close() conn.close() @@ -154,17 +144,13 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll val i = rs.getInt(1) val j = rs.getInt(2) val s = rs.getString(3) - // scalastyle:off println - println(s"$qry row($index) $i $j $s ") - // scalastyle:on println + logInfo(s"$qry row($index) $i $j $s") index += 1 assert(results.contains(i)) } - // scalastyle:off println - println(s"$qry Number of rows read " + index) - // scalastyle:on println + logInfo(s"$qry Number of rows read " + index) assert(index == results.length) rs.close() } @@ -241,16 +227,12 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll s") " + s" limit 20" + s"" - // scalastyle:off println - println(s"Iter ${iter} QUERY = ${qry}") - // scalastyle:on println + logInfo(s"Iter $iter QUERY = $qry") val df1 = snc.sql(qry) val res1 = df1.collect() - // scalastyle:off println - println(s"Iter ${iter} with query = ${qry}") - res1.foreach(println) - println(s"Iter ${iter} query end and res1 size = ${res1.length}") - // scalastyle:on println + logInfo(s"Iter $iter with query = $qry") + logInfo(res1.mkString("\n")) + logInfo(s"Iter $iter query end and res1 size = ${res1.length}") assert(res1.length == 3) val qry2 = s"select ol_1_int_id, ol_1_int2_id, ol_1_str_id " + @@ -265,12 +247,10 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll s"" val df2 = snc.sql(qry2) val res2 = df2.collect() - // scalastyle:off println - println(s"Iter ${iter} with query2 = ${qry2}") - res2.foreach(println) - println(s"Iter ${iter} query2 end with res size = ${res2.length}") - // scalastyle:on println - assert(!(res1.sameElements(res2))) + logInfo(s"Iter $iter with query2 = $qry2") + logInfo(res2.mkString("\n")) + logInfo(s"Iter $iter query2 end with res size = ${res2.length}") + assert(!res1.sameElements(res2)) assert(res2.length == 3) } @@ -314,9 +294,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll stmt.addBatch(s"insert into $tName values(5,null,'ddd')") stmt.addBatch(s"insert into $tName values(6,10.6,'ddd')") stmt.executeBatch() - // scalastyle:off println - println(s"inserted rows") - // scalastyle:on println + logInfo(s"inserted rows") } finally { stmt.close() conn.close() @@ -337,9 +315,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll assert(rs.getString(2) != null) index += 1 } - // scalastyle:off println - println(s"Number of rows read $index sum=$sum") - // scalastyle:on println + logInfo(s"Number of rows read $index sum=$sum") assert(index == 5, index) assert(sum - 18138.2 == 0, sum) rs.close() @@ -367,9 +343,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll } }) stmt.executeBatch() - // scalastyle:off println - println(s"committed $numRows rows") - // scalastyle:on println + logInfo(s"committed $numRows rows") } finally { stmt.close() conn.close() @@ -383,13 +357,9 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll val stmt = conn.createStatement() try { val query = s"select distinct ol_w_id from order_line_row_bool" - - snc.sql(query).show() - val count = snc.sql(query).count() + val count = snc.sql(query).collect().length assert(count == 2) - // scalastyle:off println - println("snc: Number of rows read " + count) - // scalastyle:on println + logInfo("snc: Number of rows read " + count) val rs = stmt.executeQuery(query) var index = 0 @@ -397,9 +367,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll rs.getInt(1) index += 1 } - // scalastyle:off println - println("jdbc: Number of rows read " + index) - // scalastyle:on println + logInfo("jdbc: Number of rows read " + index) assert(index == 2) rs.close() } finally { @@ -459,25 +427,18 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll s" (p.ds_class_id = 'C' AND p.property = 'DOUBLE_PROP' AND p.double_value > 0.2) OR " + s" (p.ds_class_id = 'C' AND p.property = 'DOUBLE_PROP' AND p.double_value < 0.2)" - snc.sql(query).show() - val count = snc.sql(query).count() + val count = snc.sql(query).collect().length assert(count == 2) - // scalastyle:off println - println("snc: Number of rows read " + count) - // scalastyle:on println + logInfo("snc: Number of rows read " + count) val rs = stmt.executeQuery(query) var index = 0 while (rs.next()) { index += 1 - // scalastyle:off println - println(s"$index: ${rs.getString(1)} ${rs.getString(2)} ${rs.getString(3)} " + + logInfo(s"$index: ${rs.getString(1)} ${rs.getString(2)} ${rs.getString(3)} " + s"${rs.getString(4)} ${rs.getString(5)} ${rs.getLong(6)} ${rs.getBigDecimal(7)}") - // scalastyle:on println } - // scalastyle:off println - println("jdbc: Number of rows read " + index) - // scalastyle:on println + logInfo("jdbc: Number of rows read " + index) assert(index == 2) rs.close() } finally { @@ -504,9 +465,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll } }) stmt.executeBatch() - // scalastyle:off println - println(s"committed $numRows rows") - // scalastyle:on println + logInfo(s"committed $numRows rows") } finally { stmt.close() conn.close() @@ -651,9 +610,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll } }) stmt.executeBatch() - // scalastyle:off println - println(s"insertRows2: committed $numRows rows") - // scalastyle:on println + logInfo(s"insertRows2: committed $numRows rows") } finally { stmt.close() conn.close() @@ -665,9 +622,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll val stmt = conn.createStatement() try { val numRows = stmt.executeUpdate(s"insert into $tableName1 select * from $tableName2") - // scalastyle:off println - println(s"insertInto $numRows rows") - // scalastyle:on println + logInfo(s"insertInto $numRows rows") assert(numRows == rowsExpected) } finally { stmt.close() @@ -680,9 +635,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll val stmt = conn.createStatement() try { val numRows = stmt.executeUpdate(s"put into $tableName1 select * from $tableName2") - // scalastyle:off println - println(s"putInto $numRows rows") - // scalastyle:on println + logInfo(s"putInto $numRows rows") assert(numRows == rowsExpected) } finally { stmt.close() @@ -723,11 +676,6 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll df.foreach(r => { val col1 = r.getInt(0) val col2 = r.getInt(1) - - // scalastyle:off println - println(s"select row $r") - // scalastyle:on println - if (col1 < 6) { assertionNotFailed = assertionNotFailed && (col1 + 1 == col2) } else { @@ -759,7 +707,7 @@ class QueryRoutingSingleNodeSuite extends SnappyFunSuite with BeforeAndAfterAll val connSession = allSessions.head // skip the "isCached" checks with JDBC since session is different for JDBC connection ColumnTableBatchInsertTest.testSparkCachingUsingSQL(sc, - MetadataTest.resultSetToDataset(connSession, stmt), connSession.catalog.isCached, + SnappyFunSuite.resultSetToDataset(connSession, stmt), connSession.catalog.isCached, df => connSession.sharedState.cacheManager.lookupCachedData(df).isDefined) stmt.close() } finally { diff --git a/cluster/src/test/scala/io/snappydata/cluster/StringAsClobTestSuite.scala b/cluster/src/test/scala/io/snappydata/cluster/StringAsClobTestSuite.scala index 68d32f65c0..93e30f8311 100644 --- a/cluster/src/test/scala/io/snappydata/cluster/StringAsClobTestSuite.scala +++ b/cluster/src/test/scala/io/snappydata/cluster/StringAsClobTestSuite.scala @@ -25,7 +25,7 @@ import org.scalatest.BeforeAndAfterAll class StringAsClobTestSuite extends SnappyFunSuite with BeforeAndAfterAll { - val default_chunk_size = GemFireXDUtils.DML_MAX_CHUNK_SIZE + private val default_chunk_size = GemFireXDUtils.DML_MAX_CHUNK_SIZE var serverHostPort = "" val tableName = "order_line_col" @@ -48,7 +48,7 @@ class StringAsClobTestSuite extends SnappyFunSuite with BeforeAndAfterAll { test("Test char") { snc val serverHostPort2 = TestUtil.startNetServer() - println("network server started") + logInfo("network server started") val conn = DriverManager.getConnection(s"jdbc:snappydata://$serverHostPort2") val s = conn.createStatement() s.executeUpdate(s"create table $tableName (id int not null primary key, name String, address " + @@ -60,7 +60,7 @@ class StringAsClobTestSuite extends SnappyFunSuite with BeforeAndAfterAll { s.executeUpdate(s"update $tableName set name='abc1' where id=111") val rs = s.executeQuery(s"select id, name, address from $tableName") while (rs.next()) { - println(s"${rs.getInt(1)} ${rs.getString(2)} ${rs.getString(3)}") + logInfo(s"${rs.getInt(1)} ${rs.getString(2)} ${rs.getString(3)}") } val rs2 = s.executeQuery(s"select id from $tableName where name='abc1'") if (rs2.next()) { diff --git a/cluster/src/test/scala/org/apache/spark/memory/SnappyMemoryAccountingSuite.scala b/cluster/src/test/scala/org/apache/spark/memory/SnappyMemoryAccountingSuite.scala index 7bcc559f9f..df997aa904 100644 --- a/cluster/src/test/scala/org/apache/spark/memory/SnappyMemoryAccountingSuite.scala +++ b/cluster/src/test/scala/org/apache/spark/memory/SnappyMemoryAccountingSuite.scala @@ -235,21 +235,19 @@ class SnappyMemoryAccountingSuite extends MemoryFunSuite { // 208 *10. 208 is the row size + memory overhead var rows = 0 - // scalastyle:off try { for (i <- 1 to 100) { val row = Row(100000000, 10000000, 10000000) - println(s"RowCount1 = $rows") + logInfo(s"RowCount1 = $rows") snSession.insert("t1", row) rows += 1 - println(s"RowCount2 = $rows") + logInfo(s"RowCount2 = $rows") } } catch { case sqle: SQLException if sqle.getSQLState == "XCL54" => - println(s"RowCount3 in exception = $rows") + logInfo(s"RowCount3 in exception = $rows") assert(totalEvictedBytes > 0) } - // scalastyle:on SparkEnv.get.memoryManager. asInstanceOf[SnappyUnifiedMemoryManager].dropAllObjects(memoryMode) val count = snSession.sql("select * from t1").count() diff --git a/cluster/src/test/scala/org/apache/spark/sql/IndexTest.scala b/cluster/src/test/scala/org/apache/spark/sql/IndexTest.scala index 58ce43b926..4501ede1c9 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/IndexTest.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/IndexTest.scala @@ -53,7 +53,6 @@ class IndexTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach { snc.sql("create table checko (col1 Integer primary key, col2 Integer) using row options " + "(partition_by 'col1') ") - // scalastyle:off println val data = sc.parallelize(Seq(Row(1, 1), Row(2, 2), Row(3, 3), Row(4, 4), Row(5, 5), Row(6, 6))) @@ -74,7 +73,6 @@ class IndexTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach { df.filter("b < 2").selectExpr("i as col1").write.deleteFrom("APP.CHECKO") assert(snc.sql("select * from checko").count() == 3) - // scalastyle:on println } test("check varchar index") { @@ -115,12 +113,11 @@ class IndexTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach { snc.sql("update ods.organizations set descr = 'EL " + " " + " ' where client_id = 8006") - snc.sql("select * from ods.organizations").show() - snc.sql("select client_id, descr from ods.organizations where client_id = 8006").show() + snc.sql("select * from ods.organizations").collect() + snc.sql("select client_id, descr from ods.organizations where client_id = 8006").collect() } test("tpch queries") { - // scalastyle:off println val qryProvider = new TPCH with SnappyAdapter val queries = Array("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", @@ -151,7 +148,7 @@ class IndexTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach { |$results """.stripMargin } - println(s"Done $qNum") + logInfo(s"Done $qNum") } snc.setConf(io.snappydata.Property.EnableExperimentalFeatures.name, existing) @@ -185,7 +182,7 @@ class IndexTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach { (tableName, snc.table(tableName).count()) }.toMap - tableSizes.foreach(println) + logInfo(tableSizes.mkString("\n")) runBenchmark("select o_orderkey from orders where o_orderkey = 1", tableSizes, 2) runBenchmark("select o_orderkey from orders where o_orderkey = 32", tableSizes) runBenchmark("select o_orderkey from orders where o_orderkey = 801", tableSizes) @@ -247,7 +244,7 @@ class IndexTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach { def executor(str: String) = snc.sql(str) val size = qryProvider.estimateSizes(query, tableSizes, executor) - println(s"$qNum size $size") + logInfo(s"$qNum size $size") val b = new Benchmark(s"JoinOrder optimization", size, minNumIters = 10) def case1(): Unit = snc.setConf(io.snappydata.Property.EnableExperimentalFeatures.name, @@ -287,12 +284,11 @@ class IndexTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach { } test("northwind queries") { - println("") // val sctx = sc(c => c.set("spark.sql.inMemoryColumnarStorage.batchSize", "40000")) // val snc = getOrCreate(sctx) // NorthWindDUnitTest.createAndLoadColumnTables(snc) // val s = "select distinct shipcountry from orders" - // snc.sql(s).show() + // snc.sql(s).collect() // NWQueries.assertJoin(snc, NWQueries.Q42, "Q42", 22, 1, classOf[LocalJoin]) /* Thread.sleep(1000 * 60 * 60) diff --git a/cluster/src/test/scala/org/apache/spark/sql/MiscTest.scala b/cluster/src/test/scala/org/apache/spark/sql/MiscTest.scala index 2654af2230..aaeaf0c3ac 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/MiscTest.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/MiscTest.scala @@ -35,7 +35,7 @@ class MiscTest extends SnappyFunSuite with Logging { snc.sql("WITH temp_table AS ( SELECT ol_1_int2_id as col1," + " sum(ol_1_int_id) AS col2 FROM table1 GROUP BY ol_1_int2_id)" + " SELECT ol_1_int2_id FROM temp_table ," + - " table1 WHERE ol_1_int2_id = col1 LIMIT 100 ").show + " table1 WHERE ol_1_int2_id = col1 LIMIT 100 ").collect() } test("Pool test") { diff --git a/cluster/src/test/scala/org/apache/spark/sql/SingleNodeTest.scala b/cluster/src/test/scala/org/apache/spark/sql/SingleNodeTest.scala index 037bab1b61..b0d74ffd50 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/SingleNodeTest.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/SingleNodeTest.scala @@ -103,7 +103,7 @@ object SingleNodeTest { case zp: ZippedPartitionsPartition => zp.partitionValues.map { case mb: MultiBucketExecutorPartition => mb.bucketsString } - case _ => Seq.empty + case _ => Nil } // each BucketExecutor must have only one bucket. diff --git a/cluster/src/test/scala/org/apache/spark/sql/execution/SnappyTableMutableAPISuite.scala b/cluster/src/test/scala/org/apache/spark/sql/execution/SnappyTableMutableAPISuite.scala index 42d6b4a308..31242ad34a 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/execution/SnappyTableMutableAPISuite.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/execution/SnappyTableMutableAPISuite.scala @@ -308,7 +308,7 @@ class SnappyTableMutableAPISuite extends SnappyFunSuite with Logging with Before snc.insert("row_table", Row(4, "4", "3", 3)) val df = snc.sql("update row_table set col3 = '5' where col2 in (select col2 from col_table)") - df.show + df.collect() val resultdf = snc.table("row_table").collect() assert(resultdf.length == 4) @@ -1151,7 +1151,7 @@ class SnappyTableMutableAPISuite extends SnappyFunSuite with Logging with Before SnappyContext.globalSparkContext.stop() snc = new SnappySession(sc) - snc.sql("select count(1) from t1").show + snc.sql("select count(1) from t1").collect() } test("Bug-2348 : Invalid stats bitmap") { diff --git a/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/ColumnCacheBenchmark.scala b/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/ColumnCacheBenchmark.scala index ace93d2a51..8a01a308ba 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/ColumnCacheBenchmark.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/ColumnCacheBenchmark.scala @@ -438,12 +438,12 @@ class ColumnCacheBenchmark extends SnappyFunSuite { testDF.write.insertInto("wide_table1") val uniqDf = snappySession.table("wide_table").dropDuplicates(Array("C1")) - uniqDf.count() + logInfo("Number of unique rows in wide_table = " + uniqDf.count()) // check fallback plans being invoked via API - uniqDf.show() + logInfo(uniqDf.collect().mkString("\n")) // and also via SQL val s = (2 to num_col).map(i => s"last(C$i)").mkString(",") - snappySession.sql(s"select C1, $s from wide_table group by C1").show() + snappySession.sql(s"select C1, $s from wide_table group by C1").collect() val df = snappySession.sql("select *" + " from wide_table a , wide_table1 b where a.c1 = b.c1 and a.c1 = '1'") @@ -451,7 +451,6 @@ class ColumnCacheBenchmark extends SnappyFunSuite { val df0 = snappySession.sql(s"select * from wide_table") df0.collect() - df0.show() val avgProjections = (1 to num_col).map(i => s"AVG(C$i)").mkString(",") val df1 = snappySession.sql(s"select $avgProjections from wide_table") diff --git a/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/TAQTest.scala b/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/TAQTest.scala index dfe97273ba..e0f4d4c550 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/TAQTest.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/execution/benchmark/TAQTest.scala @@ -258,9 +258,6 @@ object TAQTest extends Logging with Assertions { private def collect(df: Dataset[Row], expectedNumResults: Int): Unit = { val result = df.collect() assert(result.length === expectedNumResults) - // scalastyle:off - println(s"Count = ${result.length}") - // scalastyle:on } private def doGC(): Unit = { diff --git a/cluster/src/test/scala/org/apache/spark/sql/kafka010/SnappyStructuredKafkaSuite.scala b/cluster/src/test/scala/org/apache/spark/sql/kafka010/SnappyStructuredKafkaSuite.scala index c54fc80da7..27754afb50 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/kafka010/SnappyStructuredKafkaSuite.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/kafka010/SnappyStructuredKafkaSuite.scala @@ -21,14 +21,13 @@ import java.util.concurrent.atomic.AtomicInteger import io.snappydata.SnappyFunSuite import org.apache.kafka.common.TopicPartition - -import org.apache.spark.sql.functions.{count, window} -import org.apache.spark.sql.streaming.ProcessingTime import org.scalatest.concurrent.Eventually import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll} import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.encoders.RowEncoder +import org.apache.spark.sql.functions.{count, window} +import org.apache.spark.sql.streaming.ProcessingTime case class Account(accountName: String) @@ -236,7 +235,7 @@ class SnappyStructuredKafkaSuite extends SnappyFunSuite with Eventually .start() streamingQuery.processAllAvailable() - session.sql("select * from snappyWindowAggrTable").show(200) + logInfo(session.sql("select * from snappyWindowAggrTable").limit(200).collect().mkString("\n")) streamingQuery.stop() } diff --git a/cluster/src/test/scala/org/apache/spark/sql/store/SQLMetadataTest.scala b/cluster/src/test/scala/org/apache/spark/sql/store/SQLMetadataTest.scala index 37c4a2e4af..68d9812f98 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/store/SQLMetadataTest.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/store/SQLMetadataTest.scala @@ -45,7 +45,7 @@ class SQLMetadataTest extends SnappyFunSuite { val conn = DriverManager.getConnection(s"jdbc:snappydata://localhost:$netPort") try { val stmt = conn.createStatement() - MetadataTest.testSYSTablesAndVTIs(MetadataTest.resultSetToDataset(session, stmt), + MetadataTest.testSYSTablesAndVTIs(SnappyFunSuite.resultSetToDataset(session, stmt), netServers = Seq(s"localhost/127.0.0.1[$netPort]")) stmt.close() } finally { @@ -58,7 +58,7 @@ class SQLMetadataTest extends SnappyFunSuite { val conn = DriverManager.getConnection(s"jdbc:snappydata://localhost:$netPort") try { val stmt = conn.createStatement() - MetadataTest.testDescribeShowAndExplain(MetadataTest.resultSetToDataset(session, stmt), + MetadataTest.testDescribeShowAndExplain(SnappyFunSuite.resultSetToDataset(session, stmt), usingJDBC = true) stmt.close() } finally { @@ -71,7 +71,7 @@ class SQLMetadataTest extends SnappyFunSuite { val conn = DriverManager.getConnection(s"jdbc:snappydata://localhost:$netPort") try { val stmt = conn.createStatement() - MetadataTest.testDSIDWithSYSTables(MetadataTest.resultSetToDataset(session, stmt), + MetadataTest.testDSIDWithSYSTables(SnappyFunSuite.resultSetToDataset(session, stmt), Seq(s"localhost/127.0.0.1[$netPort]")) stmt.close() } finally { diff --git a/core/build.gradle b/core/build.gradle index e7647f35d9..518b3a2b4d 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -15,12 +15,8 @@ * LICENSE file. */ -plugins { - id 'com.github.johnrengelman.shadow' version '2.0.4' - id 'de.undercouch.download' version '3.4.3' -} - apply plugin: 'scala' +apply plugin: 'de.undercouch.download' compileScala.options.encoding = 'UTF-8' // fix scala+java mix to all use compileScala which uses correct dependency order @@ -94,21 +90,21 @@ dependencies { } compile project(":snappy-jdbc_${scalaBinaryVersion}") - compile("org.parboiled:parboiled_${scalaBinaryVersion}:2.1.4") { + compile("org.parboiled:parboiled_${scalaBinaryVersion}:${parboiledVersion}") { exclude(group: 'org.scala-lang', module: 'scala-library') exclude(group: 'org.scala-lang', module: 'scala-reflect') exclude(group: 'org.scala-lang', module: 'scala-compiler') } - compile 'org.apache.tomcat:tomcat-jdbc:8.5.23' - compile 'com.zaxxer:HikariCP:2.7.1' - // compile 'org.spark-project:dstream-twitter_2.11:0.1.0' - compile 'org.twitter4j:twitter4j-stream:4.0.6' - compile 'org.objenesis:objenesis:2.6' + compile "org.apache.tomcat:tomcat-juli:${tomcatJdbcVersion}" + compile "org.apache.tomcat:tomcat-jdbc:${tomcatJdbcVersion}" + compile "com.zaxxer:HikariCP:${hikariCPVersion}" + compile "org.twitter4j:twitter4j-stream:${twitter4jVersion}" + compile "org.objenesis:objenesis:${objenesisVersion}" compile "com.esotericsoftware:kryo-shaded:${kryoVersion}" compile "org.eclipse.collections:eclipse-collections-api:${eclipseCollectionsVersion}" compile "org.eclipse.collections:eclipse-collections:${eclipseCollectionsVersion}" - compileOnly 'com.rabbitmq:amqp-client:3.5.7' + compileOnly "com.rabbitmq:amqp-client:${rabbitMqVersion}" testCompile project(':dunit') testCompile 'org.scala-lang:scala-actors:' + scalaVersion diff --git a/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitSecurityTest.scala b/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitSecurityTest.scala index 67098d0da5..e7ec9299a5 100644 --- a/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitSecurityTest.scala +++ b/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitSecurityTest.scala @@ -681,8 +681,7 @@ class SplitClusterDUnitSecurityTest(s: String) s"CREATE TEMPORARY TABLE ${t1}temp AS SELECT id, name FROM $schema.$t1", s"CREATE GLOBAL TEMPORARY TABLE ${t1}tempg AS SELECT id, name FROM $schema.$t1", s"CREATE EXTERNAL TABLE $schema.${t1}ext USING csv OPTIONS(path " + - s"'../../quickstart/src/main/resources/customer.csv')", - s"CREATE INDEX $schema.idx ON $schema.$t1 (id, name)") + s"'../../quickstart/src/main/resources/customer.csv')") .foreach(executeSQL(user1Stmt, _)) // user gemfire2 of same group gemGroup1 @@ -698,7 +697,6 @@ class SplitClusterDUnitSecurityTest(s: String) s"select * from $schema.$t2", s"delete from $schema.$t1 where name like 'two'", s"drop table $schema.$t1r", - s"drop index $schema.idx", s"select * from $schema.$t2").foreach(executeSQL(user2Stmt, _)) // user gemfire1 @@ -724,7 +722,7 @@ class SplitClusterDUnitSecurityTest(s: String) s"CREATE INDEX $schema.idx4 ON $schema.$t1 (id, name)") .foreach(sql => assertFailures(() => { executeSQL(user4Stmt, sql) - }, sql, Seq("42500", "42502", "42506", "42507"))) + }, sql, Seq("42500", "42502", "42506", "42507", "38000"))) // Grant DML permissions to gemfire4 and ensure it works. executeSQL(user1Stmt, s"grant select on $schema.$t1 to ldapgroup:$group2") @@ -804,7 +802,7 @@ class SplitClusterDUnitSecurityTest(s: String) def getJobJar(className: String, packageStr: String = ""): String = { val dir = new File(s"$snappyProductDir/../../../cluster/build-artifacts/scala-2.11/classes/" - + s"test/$packageStr") + + s"scala/test/$packageStr") assert(dir.exists() && dir.isDirectory, s"snappy-cluster scala tests not compiled. Directory " + s"not found: $dir") val jar = TestPackageUtils.createJarFile(dir.listFiles(new FileFilter { diff --git a/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitTestBase.scala b/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitTestBase.scala index 608de8dba2..a8a0572897 100644 --- a/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitTestBase.scala +++ b/core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitTestBase.scala @@ -32,7 +32,7 @@ import com.pivotal.gemfirexd.internal.engine.Misc import io.snappydata.test.dunit.{SerializableRunnable, VM} import io.snappydata.test.util.TestException import io.snappydata.util.TestUtils -import io.snappydata.{ColumnUpdateDeleteTests, Constant} +import io.snappydata.{ColumnUpdateDeleteTests, Constant, SnappyFunSuite} import org.junit.Assert import org.apache.spark.sql.catalyst.InternalRow @@ -296,11 +296,11 @@ trait SplitClusterDUnitTestObject extends Logging { netServers, locatorId, locatorNetServer, servers, leadId) // next test metadata using JDBC connection stmt = jdbcConn.createStatement() - MetadataTest.testSYSTablesAndVTIs(MetadataTest.resultSetToDataset(session, stmt), + MetadataTest.testSYSTablesAndVTIs(SnappyFunSuite.resultSetToDataset(session, stmt), hostName = "localhost", netServers, locatorId, locatorNetServer, servers, leadId) - MetadataTest.testDescribeShowAndExplain(MetadataTest.resultSetToDataset(session, stmt), + MetadataTest.testDescribeShowAndExplain(SnappyFunSuite.resultSetToDataset(session, stmt), usingJDBC = true) - MetadataTest.testDSIDWithSYSTables(MetadataTest.resultSetToDataset(session, stmt), + MetadataTest.testDSIDWithSYSTables(SnappyFunSuite.resultSetToDataset(session, stmt), netServers, locatorId, locatorNetServer, servers, leadId) stmt.close() @@ -341,7 +341,7 @@ trait SplitClusterDUnitTestObject extends Logging { // select the data from table created in embedded mode selectFromTable(snc, "embeddedModeTable2", 1005) - var expected = Seq.empty[ComplexData] + var expected: Seq[ComplexData] = Nil // create a table in split mode if (isComplex) { expected = createComplexTableUsingDataSourceAPI(snc, "splitModeTable1", @@ -417,8 +417,10 @@ trait SplitClusterDUnitTestObject extends Logging { SnappyContext.getClusterMode(snc.sparkContext) match { case ThinClientConnectorMode(_, _) => // test index create op - snc.createIndex("tableName" + "_index", tableName, Map("COL1" -> None), - Map.empty[String, String]) + if ("row".equalsIgnoreCase(tableType)) { + snc.createIndex("tableName" + "_index", tableName, Map("COL1" -> None), + Map.empty[String, String]) + } case _ => } @@ -427,14 +429,15 @@ trait SplitClusterDUnitTestObject extends Logging { SnappyContext.getClusterMode(snc.sparkContext) match { case ThinClientConnectorMode(_, _) => // test index drop op - snc.dropIndex("tableName" + "_index", ifExists = false) + if ("row".equalsIgnoreCase(tableType)) { + snc.dropIndex("tableName" + "_index", ifExists = false) + } case _ => } } def selectFromTable(snc: SnappyContext, tableName: String, - expectedLength: Int, - expected: Seq[ComplexData] = Seq.empty[ComplexData]): Unit = { + expectedLength: Int, expected: Seq[ComplexData] = Nil): Unit = { val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect() assert(r.length == expectedLength, diff --git a/core/src/main/scala/org/apache/spark/sql/CachedDataFrame.scala b/core/src/main/scala/org/apache/spark/sql/CachedDataFrame.scala index dd004cbf4f..d2a892711b 100644 --- a/core/src/main/scala/org/apache/spark/sql/CachedDataFrame.scala +++ b/core/src/main/scala/org/apache/spark/sql/CachedDataFrame.scala @@ -106,6 +106,9 @@ class CachedDataFrame(snappySession: SnappySession, queryExecution: QueryExecuti @transient private[sql] var currentLiterals: Array[ParamLiteral] = _ + @transient + private[sql] var queryShortString: String = _ + @transient private[sql] var queryString: String = _ @@ -290,7 +293,7 @@ class CachedDataFrame(snappySession: SnappySession, queryExecution: QueryExecuti try { didPrepare = prepareForCollect() val (result, elapsedMillis) = CachedDataFrame.withNewExecutionId(snappySession, - queryString, queryString, currentQueryExecutionString, currentQueryPlanInfo, + queryShortString, queryString, currentQueryExecutionString, currentQueryPlanInfo, currentExecutionId, planStartTime, planEndTime)(body) (result, elapsedMillis * 1000000L) } finally { @@ -356,7 +359,6 @@ class CachedDataFrame(snappySession: SnappySession, queryExecution: QueryExecuti val (executedPlan, withFallback) = SnappySession.getExecutedPlan(queryExecution.executedPlan) def execute(): (Iterator[R], Long) = withNewExecutionIdTiming { - snappySession.addContextObject(SnappySession.ExecutionKey, () => queryExecution) def executeCollect(): Array[InternalRow] = { if (withFallback ne null) withFallback.executeCollect() @@ -414,7 +416,6 @@ class CachedDataFrame(snappySession: SnappySession, queryExecution: QueryExecuti try { withCallback("collect")(_ => execute()) } finally { - snappySession.removeContextObject(SnappySession.ExecutionKey) if (!hasLocalCallSite) { sc.clearCallSite() } @@ -619,7 +620,7 @@ object CachedDataFrame else Utils.nextExecutionIdMethod.invoke(SQLExecution).asInstanceOf[Long] val executionIdStr = java.lang.Long.toString(executionId) localProperties.setProperty(SQLExecution.EXECUTION_ID_KEY, executionIdStr) - localProperties.setProperty(SparkContext.SPARK_JOB_DESCRIPTION, queryLongForm) + localProperties.setProperty(SparkContext.SPARK_JOB_DESCRIPTION, queryShortForm) localProperties.setProperty(SparkContext.SPARK_JOB_GROUP_ID, executionIdStr) val startTime = System.currentTimeMillis() diff --git a/core/src/main/scala/org/apache/spark/sql/SnappyContext.scala b/core/src/main/scala/org/apache/spark/sql/SnappyContext.scala index 6d1635bf6a..4082a08fae 100644 --- a/core/src/main/scala/org/apache/spark/sql/SnappyContext.scala +++ b/core/src/main/scala/org/apache/spark/sql/SnappyContext.scala @@ -1094,23 +1094,23 @@ object SnappyContext extends Logging { SnappyContext.getClusterMode(sc) match { case _: SnappyEmbeddedMode => val deployCmds = ToolsCallbackInit.toolsCallback.getAllGlobalCmnds - if (deployCmds.nonEmpty) { + val nonEmpty = deployCmds.length > 0 + if (nonEmpty) { logInfo(s"Deploy commands size = ${deployCmds.length}") - deployCmds.foreach(d => { - logDebug(s"Deploying: $d") - val cmdFields = d.split('|') - if (cmdFields.length > 1) { - val coordinate = cmdFields(0) - val repos = if (cmdFields(1).isEmpty) None else Some(cmdFields(1)) - val cache = if (cmdFields(2).isEmpty) None else Some(cmdFields(2)) - DeployCommand(coordinate, null, repos, cache, restart = true).run(session) - } - else { - // Jars we have - DeployJarCommand(null, cmdFields(0), restart = true).run(session) - } - }) } + if (nonEmpty) deployCmds.foreach(d => { + logDebug(s"Deploying: $d") + val cmdFields = d.split('|') + if (cmdFields.length > 1) { + val coordinate = cmdFields(0) + val repos = if (cmdFields(1).isEmpty) None else Some(cmdFields(1)) + val cache = if (cmdFields(2).isEmpty) None else Some(cmdFields(2)) + DeployCommand(coordinate, null, repos, cache, restart = true).run(session) + } else { + // Jars we have + DeployJarCommand(null, cmdFields(0), restart = true).run(session) + } + }) case _ => // Nothing } } diff --git a/core/src/main/scala/org/apache/spark/sql/aqp/SnappyContextFunctions.scala b/core/src/main/scala/org/apache/spark/sql/SnappyContextFunctions.scala similarity index 98% rename from core/src/main/scala/org/apache/spark/sql/aqp/SnappyContextFunctions.scala rename to core/src/main/scala/org/apache/spark/sql/SnappyContextFunctions.scala index 8628be0034..a2e3107008 100644 --- a/core/src/main/scala/org/apache/spark/sql/aqp/SnappyContextFunctions.scala +++ b/core/src/main/scala/org/apache/spark/sql/SnappyContextFunctions.scala @@ -14,12 +14,11 @@ * permissions and limitations under the License. See accompanying * LICENSE file. */ -package org.apache.spark.sql.aqp +package org.apache.spark.sql import io.snappydata.sql.catalog.CatalogObjectType import org.apache.spark.rdd.RDD -import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.ExpressionInfo import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan diff --git a/core/src/main/scala/org/apache/spark/sql/SnappyDDLParser.scala b/core/src/main/scala/org/apache/spark/sql/SnappyDDLParser.scala index f673bab534..5d49e5f8f3 100644 --- a/core/src/main/scala/org/apache/spark/sql/SnappyDDLParser.scala +++ b/core/src/main/scala/org/apache/spark/sql/SnappyDDLParser.scala @@ -314,11 +314,8 @@ abstract class SnappyDDLParser(session: SparkSession) applyTo: Seq[String], filterExp: Expression, filterStr: String) => { val applyToAll = applyTo.exists(_.equalsIgnoreCase( SnappyParserConsts.CURRENT_USER.upper)) - val expandedApplyTo = if (applyToAll) { - Seq.empty[String] - } else { - ExternalStoreUtils.getExpandedGranteesIterator(applyTo).toSeq - } + val expandedApplyTo = if (applyToAll) Nil + else ExternalStoreUtils.getExpandedGranteesIterator(applyTo).toSeq /* val targetRelation = snappySession.sessionState.catalog.lookupRelation(tableIdent) val isTargetExternalRelation = targetRelation.find(x => x match { diff --git a/core/src/main/scala/org/apache/spark/sql/SnappySession.scala b/core/src/main/scala/org/apache/spark/sql/SnappySession.scala index a1fd27e42c..5c3d76bebb 100644 --- a/core/src/main/scala/org/apache/spark/sql/SnappySession.scala +++ b/core/src/main/scala/org/apache/spark/sql/SnappySession.scala @@ -298,7 +298,7 @@ class SnappySession(_sc: SparkContext) extends SparkSession(_sc) { /** * Remove a context object registered using [[addContextObject]]. */ - private[sql] def removeContextObject(key: Any): Unit = { + private[sql] def removeContextObject(key: Any): Any = { contextObjects.remove(key) } @@ -1871,8 +1871,8 @@ object SnappySession extends Logging { } def getExecutedPlan(plan: SparkPlan): (SparkPlan, CodegenSparkFallback) = plan match { - case cg@CodegenSparkFallback(WholeStageCodegenExec(p)) => (p, cg) - case cg@CodegenSparkFallback(p) => (p, cg) + case cg@CodegenSparkFallback(WholeStageCodegenExec(p), _) => (p, cg) + case cg@CodegenSparkFallback(p, _) => (p, cg) case WholeStageCodegenExec(p) => (p, null) case _ => (plan, null) } @@ -1891,8 +1891,8 @@ object SnappySession extends Logging { * data to the active executions. SparkListenerSQLPlanExecutionEnd is * then sent with the accumulated time of both the phases. */ - private def planExecution(qe: QueryExecution, session: SnappySession, sqlText: String, - executedPlan: SparkPlan, paramLiterals: Array[ParamLiteral], paramsId: Int) + private def planExecution(qe: QueryExecution, session: SnappySession, sqlShortText: String, + sqlText: String, executedPlan: SparkPlan, paramLiterals: Array[ParamLiteral], paramsId: Int) (f: => RDD[InternalRow]): (RDD[InternalRow], String, SparkPlanInfo, String, SparkPlanInfo, Long, Long, Long) = { // Right now the CachedDataFrame is not getting used across SnappySessions @@ -1901,7 +1901,7 @@ object SnappySession extends Logging { val context = session.sparkContext val localProperties = context.getLocalProperties localProperties.setProperty(SQLExecution.EXECUTION_ID_KEY, executionIdStr) - localProperties.setProperty(SparkContext.SPARK_JOB_DESCRIPTION, sqlText) + localProperties.setProperty(SparkContext.SPARK_JOB_DESCRIPTION, sqlShortText) localProperties.setProperty(SparkContext.SPARK_JOB_GROUP_ID, executionIdStr) val start = System.currentTimeMillis() try { @@ -1926,8 +1926,8 @@ object SnappySession extends Logging { } } - private def evaluatePlan(qe: QueryExecution, session: SnappySession, sqlText: String, - paramLiterals: Array[ParamLiteral], paramsId: Int): CachedDataFrame = { + private def evaluatePlan(qe: QueryExecution, session: SnappySession, sqlShortText: String, + sqlText: String, paramLiterals: Array[ParamLiteral], paramsId: Int): CachedDataFrame = { val (executedPlan, withFallback) = getExecutedPlan(qe.executedPlan) var planCaching = session.planCaching @@ -1963,7 +1963,7 @@ object SnappySession extends Logging { case _ => true } else true // post final execution immediately (collect for these plans will post nothing) - CachedDataFrame.withNewExecutionId(session, sqlText, sqlText, executionStr, planInfo, + CachedDataFrame.withNewExecutionId(session, sqlShortText, sqlText, executionStr, planInfo, postGUIPlans = postGUIPlans) { // create new LogicalRDD plan so that plan does not get re-executed // (e.g. just toRdd is not enough since further operators like show will pass @@ -1979,14 +1979,15 @@ object SnappySession extends Logging { case plan: CollectAggregateExec => val (childRDD, origExecutionStr, origPlanInfo, executionStr, planInfo, executionId, - planStartTime, planEndTime) = planExecution(qe, session, sqlText, plan, paramLiterals, - paramsId)(if (withFallback ne null) withFallback.execute(plan.child) else plan.childRDD) + planStartTime, planEndTime) = planExecution(qe, session, sqlShortText, sqlText, plan, + paramLiterals, paramsId)( + if (withFallback ne null) withFallback.execute(plan.child) else plan.childRDD) (childRDD, qe, origExecutionStr, origPlanInfo, executionStr, planInfo, childRDD.id, true, executionId, planStartTime, planEndTime) case plan => val (rdd, origExecutionStr, origPlanInfo, executionStr, planInfo, executionId, - planStartTime, planEndTime) = planExecution(qe, session, sqlText, plan, + planStartTime, planEndTime) = planExecution(qe, session, sqlShortText, sqlText, plan, paramLiterals, paramsId) { plan match { case p: CollectLimitExec => @@ -2050,6 +2051,7 @@ object SnappySession extends Logging { def sqlPlan(session: SnappySession, sqlText: String): CachedDataFrame = { val parser = session.sessionState.sqlParser + val sqlShortText = CachedDataFrame.queryStringShortForm(sqlText) val plan = parser.parsePlan(sqlText, clearExecutionData = true) val planCaching = session.planCaching val paramLiterals = parser.sqlParser.getAllLiterals @@ -2064,7 +2066,7 @@ object SnappySession extends Logging { session.currentKey = key try { val execution = session.executePlan(plan) - cachedDF = evaluatePlan(execution, session, sqlText, paramLiterals, paramsId) + cachedDF = evaluatePlan(execution, session, sqlShortText, sqlText, paramLiterals, paramsId) // put in cache if the DF has to be cached if (planCaching && cachedDF.isCached) { if (isTraceEnabled) { @@ -2083,12 +2085,13 @@ object SnappySession extends Logging { logDebug(s"Using cached plan for: $sqlText (existing: ${cachedDF.queryString})") cachedDF = cachedDF.duplicate() } - handleCachedDataFrame(cachedDF, plan, session, sqlText, paramLiterals, paramsId) + handleCachedDataFrame(cachedDF, plan, session, sqlShortText, sqlText, paramLiterals, paramsId) } private def handleCachedDataFrame(cachedDF: CachedDataFrame, plan: LogicalPlan, - session: SnappySession, sqlText: String, paramLiterals: Array[ParamLiteral], - paramsId: Int): CachedDataFrame = { + session: SnappySession, sqlShortText: String, sqlText: String, + paramLiterals: Array[ParamLiteral], paramsId: Int): CachedDataFrame = { + cachedDF.queryShortString = sqlShortText cachedDF.queryString = sqlText if (cachedDF.isCached && (cachedDF.paramLiterals eq null)) { cachedDF.paramLiterals = paramLiterals diff --git a/core/src/main/scala/org/apache/spark/sql/SnappyStrategies.scala b/core/src/main/scala/org/apache/spark/sql/SnappyStrategies.scala index 856517acaf..6c81b914d3 100644 --- a/core/src/main/scala/org/apache/spark/sql/SnappyStrategies.scala +++ b/core/src/main/scala/org/apache/spark/sql/SnappyStrategies.scala @@ -802,7 +802,7 @@ case class InsertCachedPlanFallback(session: SnappySession, topLevel: Boolean) else plan match { // TODO: disabled for StreamPlans due to issues but can it require fallback? case _: StreamPlan => plan - case _ => CodegenSparkFallback(plan) + case _ => CodegenSparkFallback(plan, session) } } diff --git a/core/src/main/scala/org/apache/spark/sql/collection/Utils.scala b/core/src/main/scala/org/apache/spark/sql/collection/Utils.scala index 7f58731dda..15259fabe9 100644 --- a/core/src/main/scala/org/apache/spark/sql/collection/Utils.scala +++ b/core/src/main/scala/org/apache/spark/sql/collection/Utils.scala @@ -20,7 +20,7 @@ import java.io.ObjectOutputStream import java.lang.reflect.Method import java.net.{URL, URLClassLoader} import java.nio.ByteBuffer -import java.sql.DriverManager +import java.sql.{DriverManager, ResultSet} import java.util.TimeZone import scala.annotation.tailrec @@ -45,6 +45,7 @@ import org.apache.commons.math3.distribution.NormalDistribution import org.eclipse.collections.impl.map.mutable.UnifiedMap import org.apache.spark._ +import org.apache.spark.executor.InputMetrics import org.apache.spark.io.CompressionCodec import org.apache.spark.memory.TaskMemoryManager import org.apache.spark.rdd.RDD @@ -60,7 +61,7 @@ import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow, analysis} import org.apache.spark.sql.execution.SQLExecution import org.apache.spark.sql.execution.columnar.ExternalStoreUtils.CaseInsensitiveMutableHashMap -import org.apache.spark.sql.execution.datasources.jdbc.{DriverRegistry, DriverWrapper} +import org.apache.spark.sql.execution.datasources.jdbc.{DriverRegistry, DriverWrapper, JdbcUtils} import org.apache.spark.sql.execution.metric.SQLMetric import org.apache.spark.sql.internal.SnappySessionCatalog import org.apache.spark.sql.sources.{CastLongTime, JdbcExtendedUtils} @@ -341,7 +342,7 @@ object Utils extends Logging { def mapExecutors[T: ClassTag](sc: SparkContext, f: () => Iterator[T], maxTries: Int = 30, - blockManagerIds: Seq[BlockManagerId] = Seq.empty): Array[T] = { + blockManagerIds: Seq[BlockManagerId] = Nil): Array[T] = { val cleanedF = sc.clean(f) mapExecutorsWithRetries(sc, (_: TaskContext, _: ExecutorLocalPartition) => cleanedF(), blockManagerIds, maxTries) @@ -350,7 +351,7 @@ object Utils extends Logging { def mapExecutors[T: ClassTag](sc: SparkContext, f: (TaskContext, ExecutorLocalPartition) => Iterator[T], maxTries: Int): Array[T] = { val cleanedF = sc.clean(f) - mapExecutorsWithRetries(sc, cleanedF, Seq.empty[BlockManagerId], maxTries) + mapExecutorsWithRetries(sc, cleanedF, Nil, maxTries) } private def mapExecutorsWithRetries[T: ClassTag](sc: SparkContext, @@ -730,6 +731,11 @@ object Utils extends Logging { def createCatalystConverter(dataType: DataType): Any => Any = CatalystTypeConverters.createToCatalystConverter(dataType) + def resultSetToSparkInternalRows(resultSet: ResultSet, schema: StructType, + inputMetrics: InputMetrics = new InputMetrics): Iterator[InternalRow] = { + JdbcUtils.resultSetToSparkInternalRows(resultSet, schema, inputMetrics) + } + // we should use the exact day as Int, for example, (year, month, day) -> day def millisToDays(millisUtc: Long, tz: TimeZone): Int = { // SPARK-6785: use Math.floor so negative number of days (dates before 1970) diff --git a/core/src/main/scala/org/apache/spark/sql/execution/CodegenSparkFallback.scala b/core/src/main/scala/org/apache/spark/sql/execution/CodegenSparkFallback.scala index a62e0258da..71cfaf0e7b 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/CodegenSparkFallback.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/CodegenSparkFallback.scala @@ -35,7 +35,8 @@ import org.apache.spark.sql.{SnappyContext, SnappySession, ThinClientConnectorMo * Catch exceptions in code generation of SnappyData plans and fallback * to Spark plans as last resort (including non-code generated paths). */ -case class CodegenSparkFallback(var child: SparkPlan) extends UnaryExecNode { +case class CodegenSparkFallback(var child: SparkPlan, + @transient session: SnappySession) extends UnaryExecNode { override def output: Seq[Attribute] = child.output @@ -43,6 +44,9 @@ case class CodegenSparkFallback(var child: SparkPlan) extends UnaryExecNode { override def outputOrdering: Seq[SortOrder] = child.outputOrdering + @transient private[this] val execution = + session.getContextObject[() => QueryExecution](SnappySession.ExecutionKey) + protected[sql] def isCodeGenerationException(t: Throwable): Boolean = { // search for any janino or code generation exception var cause = t @@ -116,8 +120,6 @@ case class CodegenSparkFallback(var child: SparkPlan) extends UnaryExecNode { // is still usable: SystemFailure.checkFailure() - val session = sqlContext.sparkSession.asInstanceOf[SnappySession] - val isCatalogStale = isConnectorCatalogStaleException(t, session) if (isCatalogStale) { session.externalCatalog.invalidateAll() @@ -131,7 +133,7 @@ case class CodegenSparkFallback(var child: SparkPlan) extends UnaryExecNode { } if (isCatalogStale || isCodeGenerationException(t)) { // fallback to Spark plan for code-generation exception - session.getContextObject[() => QueryExecution](SnappySession.ExecutionKey) match { + execution match { case Some(exec) => if (!isCatalogStale) { val msg = new StringBuilder @@ -147,7 +149,7 @@ case class CodegenSparkFallback(var child: SparkPlan) extends UnaryExecNode { } try { val plan = exec().executedPlan.transform { - case CodegenSparkFallback(p) => p + case CodegenSparkFallback(p, _) => p } val result = f(plan) // update child for future executions @@ -161,7 +163,7 @@ case class CodegenSparkFallback(var child: SparkPlan) extends UnaryExecNode { } finally if (!isCatalogStale) { session.sessionState.disableStoreOptimizations = false } - case None => throw t + case _ => throw t } } else { throw t diff --git a/core/src/main/scala/org/apache/spark/sql/execution/ExistingPlans.scala b/core/src/main/scala/org/apache/spark/sql/execution/ExistingPlans.scala index cdc54a1f41..8b49387296 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/ExistingPlans.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/ExistingPlans.scala @@ -213,7 +213,7 @@ private[sql] object PartitionedPhysicalScan { def getSparkPlanInfo(fullPlan: SparkPlan, paramLiterals: Array[ParamLiteral] = EMPTY_PARAMS, paramsId: Int = -1): SparkPlanInfo = { val plan = fullPlan match { - case CodegenSparkFallback(child) => child + case CodegenSparkFallback(child, _) => child case _ => fullPlan } val children = plan match { diff --git a/core/src/main/scala/org/apache/spark/sql/execution/SnappySortExec.scala b/core/src/main/scala/org/apache/spark/sql/execution/SnappySortExec.scala index 2d85d056ea..989a92879f 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/SnappySortExec.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/SnappySortExec.scala @@ -77,7 +77,7 @@ case class SnappySortExec(sortPlan: SortExec, child: SparkPlan) }) } - override def usedInputs: AttributeSet = AttributeSet(Seq.empty) + override def usedInputs: AttributeSet = AttributeSet(Nil) override def inputRDDs(): Seq[RDD[InternalRow]] = child.asInstanceOf[CodegenSupport].inputRDDs() diff --git a/core/src/main/scala/org/apache/spark/sql/execution/TableExec.scala b/core/src/main/scala/org/apache/spark/sql/execution/TableExec.scala index fbad8475e4..aa5d530399 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/TableExec.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/TableExec.scala @@ -114,7 +114,7 @@ trait TableExec extends UnaryExecNode with CodegenSupportOnExecutor { .region.asInstanceOf[PartitionedRegion] // if the two are different then its partition pruning case if (numBuckets == rdd.getNumPartitions) { - new DelegateRDD(sparkContext, rdd, Seq.empty[RDD[InternalRow]], + new DelegateRDD(sparkContext, rdd, Nil, Array.tabulate(numBuckets)( StoreUtils.getBucketPreferredLocations(region, _, forWrite = true))) } else rdd @@ -147,7 +147,7 @@ trait TableExec extends UnaryExecNode with CodegenSupportOnExecutor { // if the two are different then its partition pruning case if (numBuckets == rdd.getNumPartitions) { val table = relation.get.asInstanceOf[PartitionedDataSourceScan].table - new DelegateRDD(sparkContext, rdd, Seq.empty[RDD[InternalRow]], preferredLocations(table)) + new DelegateRDD(sparkContext, rdd, Nil, preferredLocations(table)) } else rdd } } diff --git a/core/src/main/scala/org/apache/spark/sql/execution/columnar/ExternalStoreUtils.scala b/core/src/main/scala/org/apache/spark/sql/execution/columnar/ExternalStoreUtils.scala index 52d989b2d9..d5996d62f9 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/columnar/ExternalStoreUtils.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/columnar/ExternalStoreUtils.scala @@ -492,11 +492,11 @@ object ExternalStoreUtils { * @return A Catalyst schema corresponding to columns in the given order. */ def pruneSchema(fieldMap: scala.collection.Map[String, StructField], - columns: Array[String]): StructType = { + columns: Array[String], columnType: String): StructType = { new StructType(columns.map { col => fieldMap.get(col) match { case None => throw new AnalysisException("Cannot resolve " + - s"""column name "$col" among (${fieldMap.keys.mkString(", ")})""") + s"""$columnType column name "$col" among (${fieldMap.keys.mkString(", ")})""") case Some(f) => f } }) diff --git a/core/src/main/scala/org/apache/spark/sql/execution/columnar/encoding/ColumnDeltaEncoder.scala b/core/src/main/scala/org/apache/spark/sql/execution/columnar/encoding/ColumnDeltaEncoder.scala index d6491c71d4..2cd3679b5d 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/columnar/encoding/ColumnDeltaEncoder.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/columnar/encoding/ColumnDeltaEncoder.scala @@ -675,7 +675,7 @@ object DeltaWriter { val evaluator = new CompilerFactory().newScriptEvaluator() evaluator.setClassName("io.snappydata.execute.GeneratedDeltaWriterFactory") evaluator.setParentClassLoader(getClass.getClassLoader) - evaluator.setDefaultImports(defaultImports) + evaluator.setDefaultImports(defaultImports: _*) val (name, complexType) = dataType match { case BooleanType => ("Boolean", "") diff --git a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatRelation.scala b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatRelation.scala index d31db1699b..0ac0e36524 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatRelation.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatRelation.scala @@ -23,7 +23,7 @@ import scala.util.control.NonFatal import com.gemstone.gemfire.internal.cache.{ExternalTableMetaData, LocalRegion} import com.pivotal.gemfirexd.internal.engine.Misc import com.pivotal.gemfirexd.internal.engine.store.GemFireContainer -import io.snappydata.Constant +import io.snappydata.{Constant, Property} import io.snappydata.sql.catalog.{RelationInfo, SnappyExternalCatalog} import org.apache.spark.rdd.RDD @@ -523,8 +523,13 @@ class ColumnFormatRelation( indexColumns: Map[String, Option[SortDirection]], options: Map[String, String]): DataFrame = { - val parameters = new CaseInsensitiveMutableHashMap(options) val session = sqlContext.sparkSession.asInstanceOf[SnappySession] + // only allow if experimental-features are enabled + if (!Property.EnableExperimentalFeatures.get(session.sessionState.conf)) { + throw new UnsupportedOperationException( + "CREATE INDEX on column tables is an experimental unsupported feature") + } + val parameters = new CaseInsensitiveMutableHashMap(options) val parser = session.snappyParser val indexCols = indexColumns.keys.map(parser.parseSQLOnly(_, parser.parseIdentifier.run())) val catalog = session.sessionCatalog diff --git a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/DefaultSource.scala b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/DefaultSource.scala index dcbac37a4e..401753f10b 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/DefaultSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/DefaultSource.scala @@ -116,9 +116,7 @@ final class DefaultSource extends ExternalSchemaRelationProvider with SchemaRela val partitions = ExternalStoreUtils.getAndSetTotalPartitions(session, parameters, forManagedTable = true) - val partitioningColumns = StoreUtils.getAndSetPartitioningAndKeyColumns(session, parameters) - val tableOptions = new CaseInsensitiveMap(parameters.toMap) val parametersForShadowTable = new CaseInsensitiveMutableHashMap(parameters) // change the schema to use VARCHAR for StringType for partitioning columns @@ -134,6 +132,9 @@ final class DefaultSource extends ExternalSchemaRelationProvider with SchemaRela } else field }) } + val partitioningColumns = StoreUtils.getAndSetPartitioningAndKeyColumns(session, + schema, parameters) + val tableOptions = new CaseInsensitiveMap(parameters.toMap) val ddlExtension = StoreUtils.ddlExtensionString(parameters, isRowTable = false, isShadowTable = false) diff --git a/core/src/main/scala/org/apache/spark/sql/execution/ddl.scala b/core/src/main/scala/org/apache/spark/sql/execution/ddl.scala index 92c6f06696..b6e73eeafc 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/ddl.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/ddl.scala @@ -367,9 +367,10 @@ case class SnappyCacheTableCommand(tableIdent: TableIdentifier, queryString: Str if (isOffHeap) df.persist(StorageLevel.OFF_HEAP) else df.persist() Nil } else { + val queryShortString = CachedDataFrame.queryStringShortForm(queryString) val localProperties = session.sparkContext.getLocalProperties val previousJobDescription = localProperties.getProperty(SparkContext.SPARK_JOB_DESCRIPTION) - localProperties.setProperty(SparkContext.SPARK_JOB_DESCRIPTION, queryString) + localProperties.setProperty(SparkContext.SPARK_JOB_DESCRIPTION, queryShortString) try { session.sessionState.enableExecutionCache = true // Get the actual QueryExecution used by InMemoryRelation so that @@ -386,7 +387,7 @@ case class SnappyCacheTableCommand(tableIdent: TableIdentifier, queryString: Str }.get val planInfo = PartitionedPhysicalScan.getSparkPlanInfo(cachedExecution.executedPlan) Row(CachedDataFrame.withCallback(session, df = null, cachedExecution, "cache")(_ => - CachedDataFrame.withNewExecutionId(session, queryString, queryString, + CachedDataFrame.withNewExecutionId(session, queryShortString, queryString, cachedExecution.toString(), planInfo)({ val start = System.nanoTime() // Dummy op to materialize the cache. This does the minimal job of count on @@ -524,7 +525,7 @@ case class DeployCommand( val deployCmd = s"$coordinates|${repos.getOrElse("")}|${jarCache.getOrElse("")}" ToolsCallbackInit.toolsCallback.addURIs(alias, jars, deployCmd) } - Seq.empty[Row] + Nil } catch { case ex: Throwable => ex match { @@ -544,7 +545,7 @@ case class DeployCommand( if (lang.Boolean.parseBoolean(System.getProperty("FAIL_ON_JAR_UNAVAILABILITY", "true"))) { throw ex } - Seq.empty[Row] + Nil } else { throw ex } @@ -579,7 +580,7 @@ case class DeployJarCommand( RefreshMetadata.executeOnAll(sc, RefreshMetadata.ADD_URIS_TO_CLASSLOADER, uris) ToolsCallbackInit.toolsCallback.addURIs(alias, jars, paths, isPackage = false) } - Seq.empty[Row] + Nil } } @@ -626,6 +627,6 @@ case class UnDeployCommand(alias: String) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { ToolsCallbackInit.toolsCallback.removePackage(alias) - Seq.empty[Row] + Nil } } diff --git a/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala b/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala index 0fdd5d2db1..8d5bc8f37d 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala @@ -208,7 +208,7 @@ case class HashJoinExec(leftKeys: Seq[Expression], } (streamPlanRDDs, buildRDDs.map(rdd => new DelegateRDD[InternalRow]( - rdd.sparkContext, rdd, Seq.empty[RDD[InternalRow]], preferredLocations))) + rdd.sparkContext, rdd, Nil, preferredLocations))) } } @@ -270,7 +270,7 @@ case class HashJoinExec(leftKeys: Seq[Expression], } (streamPlanRDDs, buildRDDs.map(rdd => new DelegateRDD[InternalRow]( - rdd.sparkContext, rdd, Seq.empty[RDD[InternalRow]], preferredLocations))) + rdd.sparkContext, rdd, Nil, preferredLocations))) } } diff --git a/core/src/main/scala/org/apache/spark/sql/execution/row/DefaultSource.scala b/core/src/main/scala/org/apache/spark/sql/execution/row/DefaultSource.scala index 6d93bb15df..863240fe12 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/row/DefaultSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/row/DefaultSource.scala @@ -97,7 +97,7 @@ final class DefaultSource extends ExternalSchemaRelationProvider with SchemaRela val fullTableName = ExternalStoreUtils.removeInternalProps(parameters) ExternalStoreUtils.getAndSetTotalPartitions(session, parameters, forManagedTable = true, forColumnTable = false) - StoreUtils.getAndSetPartitioningAndKeyColumns(session, parameters) + StoreUtils.getAndSetPartitioningAndKeyColumns(session, schema = null, parameters) val tableOptions = new CaseInsensitiveMap(parameters.toMap) val ddlExtension = StoreUtils.ddlExtensionString(parameters, isRowTable = true, isShadowTable = false) diff --git a/core/src/main/scala/org/apache/spark/sql/hive/SnappyHiveExternalCatalog.scala b/core/src/main/scala/org/apache/spark/sql/hive/SnappyHiveExternalCatalog.scala index 4e8b92d270..6662dfe07a 100644 --- a/core/src/main/scala/org/apache/spark/sql/hive/SnappyHiveExternalCatalog.scala +++ b/core/src/main/scala/org/apache/spark/sql/hive/SnappyHiveExternalCatalog.scala @@ -21,6 +21,7 @@ import java.lang.reflect.InvocationTargetException import scala.collection.JavaConverters._ import scala.collection.mutable +import scala.concurrent.ExecutionException import com.gemstone.gemfire.cache.CacheClosedException import com.gemstone.gemfire.internal.cache.{LocalRegion, PartitionedRegion} @@ -139,12 +140,13 @@ class SnappyHiveExternalCatalog private[hive](val conf: SparkConf, } catch { case he: Exception if isDisconnectException(he) => // stale JDBC connection - closeHive() + closeHive(clearCache = false) suspendActiveSession { hiveClient = hiveClient.newSession() } function case e: InvocationTargetException => throw e.getCause + case e: ExecutionException => throw e.getCause } finally { skipFlags.skipDDLocks = oldSkipLocks skipFlags.skipHiveCatalogCalls = oldSkipCatalogCalls @@ -234,12 +236,13 @@ class SnappyHiveExternalCatalog private[hive](val conf: SparkConf, } override def listDatabases(): Seq[String] = { - withHiveExceptionHandling(super.listDatabases().map(toUpperCase)) :+ SYS_SCHEMA + (withHiveExceptionHandling(super.listDatabases().map(toUpperCase).toSet) + SYS_SCHEMA) + .toSeq.sorted } override def listDatabases(pattern: String): Seq[String] = { - withHiveExceptionHandling(super.listDatabases(pattern).map(toUpperCase)) ++ - StringUtils.filterPattern(Seq(SYS_SCHEMA), pattern) + (withHiveExceptionHandling(super.listDatabases(pattern).map(toUpperCase).toSet) ++ + StringUtils.filterPattern(Seq(SYS_SCHEMA), pattern)).toSeq.sorted } override def setCurrentDatabase(schema: String): Unit = { @@ -479,7 +482,11 @@ class SnappyHiveExternalCatalog private[hive](val conf: SparkConf, if (nonExistentTables.getIfPresent(name) eq java.lang.Boolean.TRUE) { throw new TableNotFoundException(schema, table) } - cachedCatalogTables(name) + // need to do the load under a sync block to avoid deadlock due to lock inversion + // (sync block and map loader future) so do a get separately first + val catalogTable = cachedCatalogTables.getIfPresent(name) + if (catalogTable ne null) catalogTable + else withHiveExceptionHandling(cachedCatalogTables.get(name)) } override def getTableOption(schema: String, table: String): Option[CatalogTable] = { @@ -547,7 +554,8 @@ class SnappyHiveExternalCatalog private[hive](val conf: SparkConf, def refreshPolicies(ldapGroup: String): Unit = { val qualifiedLdapGroup = Constants.LDAP_GROUP_PREFIX + ldapGroup - getAllTables().foreach { table => + getAllTables().filter(_.provider.map(_.equalsIgnoreCase("policy")). + getOrElse(false)).foreach { table => val applyToStr = table.properties(PolicyProperties.policyApplyTo) if (applyToStr.nonEmpty) { val applyTo = applyToStr.split(",") @@ -723,7 +731,8 @@ class SnappyHiveExternalCatalog private[hive](val conf: SparkConf, override def close(): Unit = {} - private[hive] def closeHive(): Unit = synchronized { + private[hive] def closeHive(clearCache: Boolean): Unit = synchronized { + if (clearCache) invalidateAll() // Non-isolated client can be closed here directly which is only present in cluster mode // using the new property HiveUtils.HIVE_METASTORE_ISOLATION not present in upstream. // Isolated loader would require reflection but that case is only in snappy-core @@ -793,7 +802,8 @@ object SnappyHiveExternalCatalog { def close(): Unit = synchronized { if (instance ne null) { - instance.withHiveExceptionHandling(instance.closeHive(), handleDisconnects = false) + instance.withHiveExceptionHandling(instance.closeHive(clearCache = true), + handleDisconnects = false) instance = null } } diff --git a/core/src/main/scala/org/apache/spark/sql/internal/ColumnTableBulkOps.scala b/core/src/main/scala/org/apache/spark/sql/internal/ColumnTableBulkOps.scala index 080e18713c..9b57dd0602 100644 --- a/core/src/main/scala/org/apache/spark/sql/internal/ColumnTableBulkOps.scala +++ b/core/src/main/scala/org/apache/spark/sql/internal/ColumnTableBulkOps.scala @@ -76,7 +76,7 @@ object ColumnTableBulkOps { // set a common lock owner for entire operation session.setMutablePlanOwner(tableName, persist = true) - val updatePlan = Update(table, updateSubQuery, Seq.empty, + val updatePlan = Update(table, updateSubQuery, Nil, updateColumns, updateExpressions) val updateDS = new Dataset(sparkSession, updatePlan, RowEncoder(updatePlan.schema)) var analyzedUpdate = updateDS.queryExecution.analyzed.asInstanceOf[Update] diff --git a/core/src/main/scala/org/apache/spark/sql/internal/SnappySessionState.scala b/core/src/main/scala/org/apache/spark/sql/internal/SnappySessionState.scala index 9ab5e0f18f..1e0d1080ef 100644 --- a/core/src/main/scala/org/apache/spark/sql/internal/SnappySessionState.scala +++ b/core/src/main/scala/org/apache/spark/sql/internal/SnappySessionState.scala @@ -34,7 +34,6 @@ import io.snappydata.{Constant, Property} import org.apache.spark.internal.config.{ConfigBuilder, ConfigEntry, TypedConfigBuilder} import org.apache.spark.sql._ -import org.apache.spark.sql.aqp.SnappyContextFunctions import org.apache.spark.sql.catalyst.analysis import org.apache.spark.sql.catalyst.analysis.TypeCoercion.PromoteStrings import org.apache.spark.sql.catalyst.analysis.{Analyzer, EliminateSubqueryAliases, NoSuchTableException, Star, UnresolvedRelation} @@ -684,11 +683,11 @@ class SnappySessionState(val snappySession: SnappySession) protected def newQueryExecution(plan: LogicalPlan): QueryExecution = { new QueryExecution(snappySession, plan) { - snappySession.addContextObject(SnappySession.ExecutionKey, - () => newQueryExecution(plan)) - - override protected def preparations: Seq[Rule[SparkPlan]] = + override protected def preparations: Seq[Rule[SparkPlan]] = { + snappySession.addContextObject(SnappySession.ExecutionKey, + () => newQueryExecution(plan)) queryPreparations(topLevel = true) + } } } diff --git a/core/src/main/scala/org/apache/spark/sql/store/CodeGeneration.scala b/core/src/main/scala/org/apache/spark/sql/store/CodeGeneration.scala index 418eaf3b78..4ddebc3ec7 100644 --- a/core/src/main/scala/org/apache/spark/sql/store/CodeGeneration.scala +++ b/core/src/main/scala/org/apache/spark/sql/store/CodeGeneration.scala @@ -281,7 +281,7 @@ object CodeGeneration extends Logging { val evaluator = new CompilerFactory().newScriptEvaluator() evaluator.setClassName("io.snappydata.execute.GeneratedEvaluation") evaluator.setParentClassLoader(getClass.getClassLoader) - evaluator.setDefaultImports(defaultImports) + evaluator.setDefaultImports(defaultImports: _*) val separator = "\n " val varDeclarations = ctx.mutableStates.map { case (javaType, name, init) => s"$javaType $name;$separator${init.replace("this.", "")}" @@ -329,7 +329,7 @@ object CodeGeneration extends Logging { val evaluator = new CompilerFactory().newScriptEvaluator() evaluator.setClassName("io.snappydata.execute.GeneratedIndexEvaluation") evaluator.setParentClassLoader(getClass.getClassLoader) - evaluator.setDefaultImports(defaultImports) + evaluator.setDefaultImports(defaultImports: _*) val separator = "\n " val varDeclarations = ctx.mutableStates.map { case (javaType, name, init) => s"$javaType $name;$separator${init.replace("this.", "")}" @@ -428,7 +428,7 @@ object CodeGeneration extends Logging { classOf[CalendarInterval].getName, classOf[ArrayData].getName, classOf[MapData].getName, - classOf[InternalDataSerializer].getName)) + classOf[InternalDataSerializer].getName): _*) val separator = "\n " val varDeclarations = ctx.mutableStates.map { case (javaType, name, init) => s"$javaType $name;$separator${init.replace("this.", "")}" @@ -453,7 +453,7 @@ object CodeGeneration extends Logging { def executeUpdate(name: String, stmt: PreparedStatement, rows: Seq[Row], multipleRows: Boolean, batchSize: Int, schema: Array[StructField], dialect: JdbcDialect): Int = { - val iterator = new java.util.Iterator[InternalRow] { + val iterator: java.util.Iterator[InternalRow] = new java.util.Iterator[InternalRow] { private val baseIterator = rows.iterator private val encoder = RowEncoder(StructType(schema)) diff --git a/core/src/main/scala/org/apache/spark/sql/store/StoreUtils.scala b/core/src/main/scala/org/apache/spark/sql/store/StoreUtils.scala index 0d7f4f33c6..4360f5ff26 100644 --- a/core/src/main/scala/org/apache/spark/sql/store/StoreUtils.scala +++ b/core/src/main/scala/org/apache/spark/sql/store/StoreUtils.scala @@ -327,7 +327,7 @@ object StoreUtils { val pkDisallowdTypes = Seq(StringType, BinaryType, ArrayType, MapType, StructType) - def getPrimaryKeyClause(parameters: mutable.Map[String, String], + def getPrimaryKeyClause(parameters: scala.collection.Map[String, String], schema: StructType): (String, Seq[StructField]) = { val sb = new StringBuilder() val stringPKCols = new mutable.ArrayBuffer[StructField](1) @@ -342,7 +342,7 @@ object StoreUtils { // since table creation can use case-insensitive in creation val normalizedCols = cols.map(Utils.toUpperCase) val prunedSchema = ExternalStoreUtils.pruneSchema(schemaFields, - normalizedCols) + normalizedCols, columnType = "partition") var includeInPK = true for (field <- prunedSchema.fields if includeInPK) { @@ -495,7 +495,7 @@ object StoreUtils { } def getAndSetPartitioningAndKeyColumns(session: SnappySession, - parameters: mutable.Map[String, String]): Seq[String] = { + schema: StructType, parameters: mutable.Map[String, String]): Seq[String] = { // parse the PARTITION_BY and KEYCOLUMNS and store the parsed result back in parameters // Use a new parser instance since parser may itself invoke DataSource.resolveRelation. @@ -503,9 +503,15 @@ object StoreUtils { val keyColumns = parameters.get(KEY_COLUMNS) match { case None => Nil case Some(k) => - val keyCols = k.split(",").map(parser.parseSQLOnly(_, parser.parseIdentifier.run())).toList + if (schema eq null) { // row table + throw new AnalysisException(s"$KEY_COLUMNS specified for a row table (use PRIMARY KEY)") + } + val keyCols = k.split(",").map(parser.parseSQLOnly(_, parser.parseIdentifier.run())) + // check for validity of columns + val schemaFields = Utils.schemaFields(schema) + ExternalStoreUtils.pruneSchema(schemaFields, keyCols, "key") parameters.put(KEY_COLUMNS, keyCols.mkString(",")) - keyCols + keyCols.toList } parameters.get(PARTITION_BY) match { case None => diff --git a/core/src/test/java/io/snappydata/api/JavaCreateIndexTestSuite.java b/core/src/test/java/io/snappydata/api/JavaCreateIndexTestSuite.java index a0d3fcb1e4..ac7d06afe9 100644 --- a/core/src/test/java/io/snappydata/api/JavaCreateIndexTestSuite.java +++ b/core/src/test/java/io/snappydata/api/JavaCreateIndexTestSuite.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; +import io.snappydata.Property; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; @@ -44,6 +45,7 @@ public class JavaCreateIndexTestSuite implements Serializable { @Before public void setUp() { + snc.setConf(Property.EnableExperimentalFeatures().name(), "true"); List dummyList = new ArrayList(); for (int i = 0; i < 2; i++) { DummyBeanClass object = new DummyBeanClass(); diff --git a/core/src/test/resources/log4j.properties b/core/src/test/resources/log4j.properties index 222d083979..0df4d6bfa0 100644 --- a/core/src/test/resources/log4j.properties +++ b/core/src/test/resources/log4j.properties @@ -38,11 +38,20 @@ log4j.rootCategory=INFO, file log4j.appender.file=org.apache.log4j.RollingFileAppender log4j.appender.file.append=true log4j.appender.file.file=snappydata.log -log4j.appender.file.MaxFileSize=100MB +log4j.appender.file.MaxFileSize=1GB log4j.appender.file.MaxBackupIndex=10000 log4j.appender.file.layout=io.snappydata.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS zzz} %t %p %c{1}: %m%n +# Appender for code dumps of WholeStageCodegenExec, CodeGenerator etc +log4j.appender.code=org.apache.log4j.RollingFileAppender +log4j.appender.code.append=true +log4j.appender.code.file=generatedcode.log +log4j.appender.code.MaxFileSize=1GB +log4j.appender.code.MaxBackupIndex=10000 +log4j.appender.code.layout=io.snappydata.log4j.PatternLayout +log4j.appender.code.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS zzz} %t %p %c{1}: %m%n + # Console appender log4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.target=System.out @@ -92,8 +101,6 @@ log4j.logger.org.apache.spark.scheduler.FairSchedulableBuilder=WARN log4j.logger.org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend$DriverEndpoint=WARN log4j.logger.org.apache.spark.storage.BlockManagerInfo=WARN log4j.logger.org.apache.hadoop.hive=WARN -# for all Spark generated code (including ad-hoc UnsafeProjection calls etc) -log4j.logger.org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator=WARN log4j.logger.org.apache.spark.sql.execution.datasources=WARN log4j.logger.org.apache.spark.scheduler.SnappyTaskSchedulerImpl=WARN log4j.logger.org.apache.spark.MapOutputTrackerMasterEndpoint=WARN @@ -110,7 +117,20 @@ log4j.logger.org.datanucleus=ERROR log4j.logger.org.apache.spark.Task=WARN log4j.logger.org.apache.spark.sql.catalyst.parser.CatalystSqlParser=WARN +# Keep log-level of some classes as INFO even if root level is higher +log4j.logger.io.snappydata.impl.LeadImpl=INFO +log4j.logger.io.snappydata.impl.ServerImpl=INFO +log4j.logger.io.snappydata.impl.LocatorImpl=INFO +log4j.logger.spray.can.server.HttpListener=INFO + # for generated code of plans -# log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenExec=DEBUG +log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenExec=INFO, code +log4j.additivity.org.apache.spark.sql.execution.WholeStageCodegenExec=false +log4j.logger.org.apache.spark.sql.execution.WholeStageCodegenRDD=INFO, code +log4j.additivity.org.apache.spark.sql.execution.WholeStageCodegenRDD=false +# for all Spark generated code (including ad-hoc UnsafeProjection calls etc) +log4j.logger.org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator=WARN, code +log4j.additivity.org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator=false # for SnappyData generated code used on store (ComplexTypeSerializer, JDBC inserts ...) -# log4j.logger.org.apache.spark.sql.store.CodeGeneration=DEBUG +log4j.logger.org.apache.spark.sql.store.CodeGeneration=INFO, code +log4j.additivity.org.apache.spark.sql.store.CodeGeneration=false diff --git a/core/src/test/scala/io/snappydata/ColumnUpdateDeleteTests.scala b/core/src/test/scala/io/snappydata/ColumnUpdateDeleteTests.scala index 2b18280408..a9fed5449a 100644 --- a/core/src/test/scala/io/snappydata/ColumnUpdateDeleteTests.scala +++ b/core/src/test/scala/io/snappydata/ColumnUpdateDeleteTests.scala @@ -77,10 +77,10 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { session.sql(s"update updateTable set id = id + ($numElements / 2) where id <> 73") - session.table("updateTable").show() + logInfo(session.table("updateTable").limit(20).collect().mkString("\n")) session.sql(s"update updateTable set id = id + ($numElements / 2) where id <> 73") - session.table("updateTable").show() + logInfo(session.table("updateTable").limit(20).collect().mkString("\n")) assert(session.table("updateTable").count() === numElements) assert(session.table("checkTable1").count() === numElements) @@ -110,7 +110,7 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { "case when (id % 2) = 0 then true else false end").write.insertInto("checkTable2") session.sql(s"update updateTable set addr = concat(addr, '_update') where id <> 32") - session.table("updateTable").show() + logInfo(session.table("updateTable").limit(20).collect().mkString("\n")) assert(session.table("updateTable").count() === numElements) assert(session.table("checkTable2").count() === numElements) @@ -138,7 +138,7 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { "case when (id % 2) = 1 then true else false end").write.insertInto("checkTable3") session.sql(s"update updateTable set status = not status where id <> 87") - session.table("updateTable").show() + logInfo(session.table("updateTable").limit(20).collect().mkString("\n")) assert(session.table("updateTable").count() === numElements) assert(session.table("checkTable3").count() === numElements) @@ -320,10 +320,10 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { "case when (id % 2) = 0 then true else false end").write.insertInto("checkTable2") session.sql(s"update updateTable set id = id + ($numElements / 2) where id <> 73") - session.table("updateTable").show() + logInfo(session.table("updateTable").limit(20).collect().mkString("\n")) session.sql(s"update updateTable set id = id + ($numElements / 2) where id <> 73") - session.table("updateTable").show() + logInfo(session.table("updateTable").limit(20).collect().mkString("\n")) assert(session.table("updateTable").count() === (numElements * 9) / 10) assert(session.table("updateTable").collect().length === (numElements * 9) / 10) @@ -384,7 +384,7 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { var result = session.sql("select UnitPrice, tid from order_details where tid <> 6").collect() assert(result.length === numElements - 1) - assert(result.toSeq.filter(_.getDouble(0) != 1.0) === Seq.empty) + assert(result.toSeq.filter(_.getDouble(0) != 1.0) === Nil) result = session.sql("select UnitPrice from order_details where tid = 6").collect() assert(result.length === 1) @@ -397,10 +397,10 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { assert(result(0).getDouble(0) == 1.1) result = session.sql("select UnitPrice, tid from order_details where tid <> 6").collect() assert(result.length === numElements - 1) - assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Seq.empty) + assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Nil) result = session.sql("select UnitPrice, tid from order_details").collect() assert(result.length === numElements) - assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Seq.empty) + assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Nil) session.sql("UPDATE order_details SET UnitPrice = 1.1 WHERE tid <> 11") @@ -410,10 +410,10 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { assert(result(0).getDouble(0) == 1.1) result = session.sql("select UnitPrice, tid from order_details where tid <> 6").collect() assert(result.length === numElements - 1) - assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Seq.empty) + assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Nil) result = session.sql("select UnitPrice, tid from order_details").collect() assert(result.length === numElements) - assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Seq.empty) + assert(result.toSeq.filter(_.getDouble(0) != 1.1) === Nil) session.sql("drop table order_details") session.conf.unset(Property.ColumnBatchSize.name) @@ -551,7 +551,7 @@ object ColumnUpdateDeleteTests extends Assertions with Logging { assert(exceptions.isEmpty, s"Failed with exceptions: $exceptions") - session.table("updateTable").show() + logInfo(session.table("updateTable").limit(20).collect().mkString("\n")) var res = session.sql( "select * from updateTable EXCEPT select * from checkTable1").collect() diff --git a/core/src/test/scala/io/snappydata/SnappyFunSuite.scala b/core/src/test/scala/io/snappydata/SnappyFunSuite.scala index bc0b9c1c1d..9361e16db7 100644 --- a/core/src/test/scala/io/snappydata/SnappyFunSuite.scala +++ b/core/src/test/scala/io/snappydata/SnappyFunSuite.scala @@ -17,6 +17,7 @@ package io.snappydata import java.io.File +import java.sql.Statement import scala.collection.mutable.ArrayBuffer @@ -27,10 +28,15 @@ import io.snappydata.test.dunit.DistributedTestBase.{InitializeRun, WaitCriterio import io.snappydata.util.TestUtils import org.scalatest.Assertions +import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder} import org.apache.spark.sql.catalyst.expressions.{Alias, And, AttributeReference, EqualNullSafe, EqualTo, Exists, ExprId, Expression, ListQuery, PredicateHelper, PredicateSubquery, ScalarSubquery} import org.apache.spark.sql.catalyst.plans.logical.{Filter, Join, LogicalPlan, OneRowRelation, Sample} import org.apache.spark.sql.catalyst.util.{sideBySide, stackTraceToString} -import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, QueryTest, Row} +import org.apache.spark.sql.collection.Utils +import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils +import org.apache.spark.sql.row.SnappyStoreDialect +import org.apache.spark.sql.types.StructType +import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, QueryTest, Row, SnappySession} // scalastyle:off import org.scalatest.{BeforeAndAfterAll, FunSuite, Outcome, Retries} // scalastyle:on @@ -67,7 +73,7 @@ abstract class SnappyFunSuite } } - protected def sc(addOn: (SparkConf) => SparkConf): SparkContext = { + protected def sc(addOn: SparkConf => SparkConf): SparkContext = { val ctx = SnappyContext.globalSparkContext if (ctx != null && !ctx.isStopped) { ctx @@ -78,7 +84,7 @@ abstract class SnappyFunSuite } } - protected def scWithConf(addOn: (SparkConf) => SparkConf): SparkContext = { + protected def scWithConf(addOn: SparkConf => SparkConf): SparkContext = { new SparkContext(newSparkConf(addOn)) } @@ -194,12 +200,6 @@ abstract class SnappyFunSuite fileName } - protected def logStdOut(msg: String): Unit = { - // scalastyle:off - println(msg) - // scalastyle:on - } - def checkAnswer(df: => DataFrame, expectedAnswer: Seq[Row]): Unit = SnappyFunSuite.checkAnswer(df, expectedAnswer) } @@ -240,6 +240,22 @@ object SnappyFunSuite extends Assertions { assert(query.queryExecution.executedPlan.missingInput.isEmpty, s"The physical plan has missing inputs:\n${query.queryExecution.executedPlan}") } + + /** + * Converts a JDBC ResultSet to a DataFrame. + */ + def resultSetToDataset(session: SnappySession, stmt: Statement) + (sql: String): Dataset[Row] = { + if (stmt.execute(sql)) { + val rs = stmt.getResultSet + val schema = JdbcUtils.getSchema(rs, SnappyStoreDialect) + val rows = Utils.resultSetToSparkInternalRows(rs, schema).map(_.copy()).toSeq + session.internalCreateDataFrame(session.sparkContext.makeRDD(rows), schema) + } else { + implicit val encoder: ExpressionEncoder[Row] = RowEncoder(StructType(Nil)) + session.createDataset[Row](Nil) + } + } } /** diff --git a/core/src/test/scala/io/snappydata/SnappyTestRunner.scala b/core/src/test/scala/io/snappydata/SnappyTestRunner.scala index 4c4960fca1..82b716c377 100644 --- a/core/src/test/scala/io/snappydata/SnappyTestRunner.scala +++ b/core/src/test/scala/io/snappydata/SnappyTestRunner.scala @@ -49,6 +49,7 @@ with Logging with Retries { var snappyHome = "" var localHostName = "" var currWorkingDir = "" + private val commandOutput = "command-output.txt" // One can ovveride this method to pass other parameters like heap size. def servers: String = s"$localHostName\n$localHostName" @@ -88,10 +89,10 @@ with Logging with Retries { } def stopCluster(): Unit = { - executeProcess("snappyCluster", s"$snappyHome/sbin/snappy-stop-all.sh") + executeProcess("snappyCluster", s"$snappyHome/sbin/snappy-stop-all.sh", Some(commandOutput)) new File(s"$snappyHome/conf/servers").delete() new File(s"$snappyHome/conf/leads").delete() - executeProcess("sparkCluster", s"$snappyHome/sbin/stop-all.sh") + executeProcess("sparkCluster", s"$snappyHome/sbin/stop-all.sh", Some(commandOutput)) } def startupCluster(): Unit = { @@ -108,21 +109,29 @@ with Logging with Retries { } leadFile.deleteOnExit() - val (out, _) = executeProcess("snappyCluster", s"$snappyHome/sbin/snappy-start-all.sh") + val (out, _) = executeProcess("snappyCluster", s"$snappyHome/sbin/snappy-start-all.sh", + Some(commandOutput)) if (!out.contains(clusterSuccessString)) { throw new Exception(s"Failed to start Snappy cluster") } - executeProcess("sparkCluster", s"$snappyHome/sbin/start-all.sh") + executeProcess("sparkCluster", s"$snappyHome/sbin/start-all.sh", Some(commandOutput)) } // scalastyle:off println - def executeProcess(name: String , command: String): (String, String) = { + def executeProcess(name: String , command: String, + outFile: Option[String] = None): (String, String) = { val stdoutStream = new ByteArrayOutputStream val stderrStream = new ByteArrayOutputStream - val teeOut = new TeeOutputStream(stdout, new BufferedOutputStream(stdoutStream)) - val teeErr = new TeeOutputStream(stderr, new BufferedOutputStream(stderrStream)) + val (out, err) = outFile match { + case None => stdout -> stderr + case Some(f) => + val writer = new BufferedOutputStream(new FileOutputStream(f, true)) + writer -> writer + } + val teeOut = new TeeOutputStream(out, new BufferedOutputStream(stdoutStream)) + val teeErr = new TeeOutputStream(err, new BufferedOutputStream(stderrStream)) val stdoutWriter = new PrintStream(teeOut, true) val stderrWriter = new PrintStream(teeErr, true) @@ -136,6 +145,9 @@ with Logging with Retries { "PYTHONPATH" -> s"$snappyHome/python/lib/py4j-0.10.4-src.zip:$snappyHome/python") ! ProcessLogger(stdoutWriter.println, stderrWriter.println) var stdoutStr = stdoutStream.toString + if (out ne stdout) { + out.close() + } if (code != 0) { // add an exception to the output to force failure stdoutStr += s"\n***** Exit with Exception code = $code\n" @@ -145,16 +157,22 @@ with Logging with Retries { def SnappyShell(name: String, sqlCommand: Seq[String]): Unit = { - sqlCommand pipe snappyShell foreach (s => { - println(s) - if (s.toString.contains("ERROR") || s.toString.contains("Failed")) { - throw new Exception(s"Failed to run Query: $s") - } - }) + val writer = new PrintWriter(new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(commandOutput, true)))) + try { + sqlCommand pipe snappyShell foreach (s => { + writer.println(s) + if (s.toString.contains("ERROR") || s.toString.contains("Failed")) { + throw new Exception(s"Failed to run Query: $s") + } + }) + } finally { + writer.close() + } } def Job(jobClass: String, lead: String, jarPath: String, - confs: Seq[String] = Seq.empty[String]): Unit = { + confs: Seq[String] = Nil): Unit = { val confStr = if (confs.size > 0) confs.foldLeft("")((r, c) => s"$r --conf $c") else "" @@ -165,7 +183,7 @@ with Logging with Retries { val jobCommand: String = s"$jobSubmit --app-name " + s"${jobClass}_${System.currentTimeMillis()} --class $jobClass $confStr" - val (out, err) = executeProcess("snappyCluster", jobCommand) + val (out, err) = executeProcess("snappyCluster", jobCommand, Some(commandOutput)) val jobSubmitStr = out @@ -178,14 +196,13 @@ with Logging with Retries { map.asInstanceOf[Map[String, Map[String, Any]]]("result")("jobId") case other => throw new Exception(s"bad result : $jsonStr") } - println("jobID " + jobID) - + logInfo("jobID " + jobID) var status = "RUNNING" while (status == "RUNNING") { Thread.sleep(3000) val statusCommand = s"$jobStatus $jobID" - val (out, err) = executeProcess("snappyCluster", statusCommand) + val (out, err) = executeProcess("snappyCluster", statusCommand, Some(commandOutput)) val jobSubmitStatus = out @@ -193,7 +210,7 @@ with Logging with Retries { statusjson match { case Some(map: Map[_, _]) => val v = map.asInstanceOf[Map[String, Any]]("status") - println("Current status of job: " + v) + logInfo("Current status of job: " + v) status = v.toString case other => "bad Result" } @@ -230,7 +247,7 @@ with Logging with Retries { def SparkSubmit(name: String, appClass: String, master: Option[String], - confs: Seq[String] = Seq.empty[String], + confs: Seq[String] = Nil, appJar: String): Unit = { val sparkHost = InetAddress.getLocalHost.getHostName @@ -238,7 +255,7 @@ with Logging with Retries { val confStr = if (confs.size > 0) confs.foldLeft("")((r, c) => s"$r --conf $c") else "" val classStr = if (appClass.isEmpty) "" else s"--class $appClass" val sparkSubmit = s"$snappyHome/bin/spark-submit $classStr --master $masterStr $confStr $appJar" - val (out, err) = executeProcess(name, sparkSubmit) + val (out, err) = executeProcess(name, sparkSubmit, Some(commandOutput)) if (checkException(out) || checkException(err)) { throw new Exception(s"Failed to submit $appClass") @@ -246,10 +263,10 @@ with Logging with Retries { } def RunExample(name: String, exampleClas: String, - args: Seq[String] = Seq.empty[String]): Unit = { + args: Seq[String] = Nil): Unit = { val argsStr = args.mkString(" ") val runExample = s"$snappyHome/bin/run-example $exampleClas $argsStr" - val (out, err) = executeProcess(name, runExample) + val (out, err) = executeProcess(name, runExample, Some(commandOutput)) if (checkException(out) || checkException(err)) { throw new Exception(s"Failed to run $exampleClas") @@ -259,7 +276,7 @@ with Logging with Retries { def SparkShell(confs: Seq[String], options: String, scriptFile : String): Unit = { val confStr = if (confs.size > 0) confs.foldLeft("")((r, c) => s"$r --conf $c") else "" val shell = s"$sparkShell $confStr $options -i $scriptFile" - val (out, err) = executeProcess("snappyCluster", shell) + val (out, err) = executeProcess("snappyCluster", shell, Some(commandOutput)) if (checkException(out) || checkException(err)) { throw new Exception(s"Failed to run $shell") } @@ -267,13 +284,19 @@ with Logging with Retries { def SparkShell(confs: Seq[String], options: String, scalaStatements: Seq[String]): Unit = { - val confStr = if (confs.size > 0) confs.foldLeft("")((r, c) => s"$r --conf $c") else "" - scalaStatements pipe s"$snappyShell $confStr $options" foreach (s => { - println(s) - if (s.toString.contains("ERROR") || s.toString.contains("Failed")) { - throw new Exception(s"Failed to run Scala statement") - } - }) + val writer = new PrintWriter(new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(commandOutput, true)))) + try { + val confStr = if (confs.size > 0) confs.foldLeft("")((r, c) => s"$r --conf $c") else "" + scalaStatements pipe s"$sparkShell $confStr $options" foreach (s => { + writer.println(s) + if (s.toString.contains("ERROR") || s.toString.contains("Failed")) { + throw new Exception(s"Failed to run Scala statement") + } + }) + } finally { + writer.close() + } } /* diff --git a/core/src/test/scala/io/snappydata/app/SampleTableQuery.scala b/core/src/test/scala/io/snappydata/app/SampleTableQuery.scala index 2d51475c3f..5c1e2f3bd0 100644 --- a/core/src/test/scala/io/snappydata/app/SampleTableQuery.scala +++ b/core/src/test/scala/io/snappydata/app/SampleTableQuery.scala @@ -41,7 +41,7 @@ object SampleTableQuery extends Serializable { conf.set("spark.sql.hive.metastore.sharedPrefixes","com.mysql.jdbc,org.postgresql,com.microsoft.sqlserver,oracle.jdbc,com.mapr.fs.shim.LibraryLoader,com.mapr.security.JNISecurity,com.mapr.fs.jni,org.apache.commons") conf.set("spark.sql.unsafe.enabled", "false") val sc = new SparkContext(conf) - sc.addJar("/Users/ashahid/workspace/snappy/snappy-commons/snappy-core/build-artifacts/scala-2.10/classes/test/app.jar") + sc.addJar("/Users/ashahid/workspace/snappy/snappy-commons/snappy-core/build-artifacts/scala-2.10/classes/scala/test/app.jar") val spc = SnappyContext(sc) // val hiveContext = new HiveContext(spc) diff --git a/core/src/test/scala/org/apache/spark/jdbc/ConnectionConfTest.scala b/core/src/test/scala/org/apache/spark/jdbc/ConnectionConfTest.scala index 3e47acb199..73687362c3 100644 --- a/core/src/test/scala/org/apache/spark/jdbc/ConnectionConfTest.scala +++ b/core/src/test/scala/org/apache/spark/jdbc/ConnectionConfTest.scala @@ -206,7 +206,7 @@ class ConnectionConfTest extends SnappyFunSuite with Logging with BeforeAndAfter }) val result = snc.sql("SELECT col1 from TEST_JDBC_TABLE_1") - result.show() + result.collect() // result.collect().foreach(v => assert(v(0) == 9)) snc.sql("drop table TEST_JDBC_TABLE_1") diff --git a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableBatchInsertTest.scala b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableBatchInsertTest.scala index 86c55b43d8..18eb1fa629 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableBatchInsertTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableBatchInsertTest.scala @@ -16,15 +16,14 @@ */ package org.apache.spark.sql.store - import scala.collection.mutable import io.snappydata.SnappyFunSuite import io.snappydata.core.{Data, TestData} import org.scalatest.{Assertions, BeforeAndAfter} +import org.apache.spark.sql.{Dataset, Row, SaveMode} import org.apache.spark.{Logging, SparkContext} -import org.apache.spark.sql.{Dataset, Row, SaveMode, SnappySession} class ColumnTableBatchInsertTest extends SnappyFunSuite with Logging @@ -71,8 +70,8 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite "PARTITION_BY 'Key1'," + "BUCKETS '1')") - //val r = result.collect - //assert(r.length == 0) + // val r = result.collect + // assert(r.length == 0) var rdd = sc.parallelize( (1 to 10).map(i => TestData(i, i.toString))) @@ -99,12 +98,12 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite val r2 = result.collect assert(r2.length == 20) - println("Successful") + logInfo("Successful") } test("test the shadow table creation without partition by clause") { - //snc.sql(s"DROP TABLE IF EXISTS $tableName") + // snc.sql(s"DROP TABLE IF EXISTS $tableName") val df = snc.sql(s"CREATE TABLE $tableName(Key1 INT ,Value STRING) " + "USING column " + @@ -124,11 +123,11 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite dataDF.write.insertInto(tableName) val r2 = result.collect assert(r2.length == 19999) - println("Successful") + logInfo("Successful") } test("test the shadow table with persistence") { - //snc.sql(s"DROP TABLE IF EXISTS $tableName") + // snc.sql(s"DROP TABLE IF EXISTS $tableName") val df = snc.sql(s"CREATE TABLE $tableName(Key1 INT ,Value STRING)" + "USING column " + @@ -150,11 +149,11 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite val r2 = result.collect assert(r2.length == 19999) - println("Successful") + logInfo("Successful") } test("test the shadow table with eviction") { - //snc.sql(s"DROP TABLE IF EXISTS $tableName") + // snc.sql(s"DROP TABLE IF EXISTS $tableName") val df = snc.sql(s"CREATE TABLE $tableName(Key1 INT ,Value STRING)" + "USING column " + @@ -174,7 +173,7 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite dataDF.write.insertInto(tableName) val r2 = result.collect assert(r2.length == 19999) - println("Successful") + logInfo("Successful") } test("test the shadow table with options on compressed table") { @@ -200,18 +199,17 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite val r2 = result.collect val r3 = mutable.HashSet[Int]() - r2.map( i => { + r2.map(i => { r3.add(i.getInt(0)) }) (1 to 19999).map(i => { - if(!r3.contains(i)) - println (s"Does not contain ${i}") + if (!r3.contains(i)) logInfo(s"Does not contain $i") }) assert(r2.length == 19999) - println("Successful") + logInfo("Successful") } test("test the shadow table with eviction options on compressed table") { @@ -236,7 +234,7 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite dataDF.write.insertInto(tableName) val r2 = result.collect assert(r2.length == 19999) - println("Successful") + logInfo("Successful") } test("test create table as select with alias") { @@ -255,38 +253,38 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite val tempRowTableName = "testRowTable" val tempColTableName = "testcolTable" - snc.sql("DROP TABLE IF EXISTS "+tempRowTableName) - snc.sql("CREATE TABLE " + tempRowTableName + " AS (SELECT col1 as field1,col2 as field2 FROM " + rowTable + ")" - ) + snc.sql("DROP TABLE IF EXISTS " + tempRowTableName) + snc.sql("CREATE TABLE " + tempRowTableName + + " AS (SELECT col1 as field1,col2 as field2 FROM " + rowTable + ")") var testResults1 = snc.sql("SELECT * FROM " + tempRowTableName).collect assert(testResults1.length == 5) - snc.sql("DROP TABLE IF EXISTS "+tempRowTableName) + snc.sql("DROP TABLE IF EXISTS " + tempRowTableName) - snc.sql("DROP TABLE IF EXISTS "+tempRowTableName) - snc.sql("CREATE TABLE " + tempRowTableName + " AS (SELECT col1 as field1,col2 as field2 FROM " + colTable + ")" - ) + snc.sql("DROP TABLE IF EXISTS " + tempRowTableName) + snc.sql("CREATE TABLE " + tempRowTableName + + " AS (SELECT col1 as field1,col2 as field2 FROM " + colTable + ")") var testResults2 = snc.sql("SELECT * FROM " + tempRowTableName).collect assert(testResults2.length == 5) - snc.sql("DROP TABLE IF EXISTS "+tempRowTableName) + snc.sql("DROP TABLE IF EXISTS " + tempRowTableName) - snc.sql("DROP TABLE IF EXISTS "+tempColTableName) - snc.sql("CREATE TABLE " + tempColTableName + " USING COLUMN OPTIONS() AS (SELECT col1 as field1,col2 as field2 FROM " + rowTable + ")" - ) + snc.sql("DROP TABLE IF EXISTS " + tempColTableName) + snc.sql("CREATE TABLE " + tempColTableName + + " USING COLUMN OPTIONS() AS (SELECT col1 as field1,col2 as field2 FROM " + rowTable + ")") var testResults3 = snc.sql("SELECT * FROM " + tempColTableName).collect assert(testResults3.length == 5) - snc.sql("DROP TABLE IF EXISTS "+tempColTableName) + snc.sql("DROP TABLE IF EXISTS " + tempColTableName) - snc.sql("DROP TABLE IF EXISTS "+tempColTableName) - snc.sql("CREATE TABLE " + tempColTableName + " USING COLUMN OPTIONS() AS (SELECT col1 as field1,col2 as field2 FROM " + colTable + ")" - ) + snc.sql("DROP TABLE IF EXISTS " + tempColTableName) + snc.sql("CREATE TABLE " + tempColTableName + + " USING COLUMN OPTIONS() AS (SELECT col1 as field1,col2 as field2 FROM " + colTable + ")") var testResults4 = snc.sql("SELECT * FROM " + tempColTableName).collect assert(testResults4.length == 5) - snc.sql("DROP TABLE IF EXISTS "+tempColTableName) + snc.sql("DROP TABLE IF EXISTS " + tempColTableName) - snc.sql("DROP TABLE IF EXISTS "+rowTable) - snc.sql("DROP TABLE IF EXISTS "+colTable) + snc.sql("DROP TABLE IF EXISTS " + rowTable) + snc.sql("DROP TABLE IF EXISTS " + colTable) } @@ -294,32 +292,28 @@ class ColumnTableBatchInsertTest extends SnappyFunSuite test("test table with column name having slash") { snc.sql(s"DROP TABLE IF EXISTS $tableName") val df = snc.sql("CREATE TABLE ColumnTable(\"a/b\" INT ,Col2 INT, Col3 INT) " + - "USING column " + - "options " + - "(" + - "PARTITION_BY 'col2'," + - "BUCKETS '1')") + "USING column " + + "options " + + "(" + + "PARTITION_BY 'col2'," + + "BUCKETS '1')") snc.sql("CREATE TABLE rowTable(\"a/b\" INT ,Col2 INT, Col3 INT) " + - "USING row " + - "options " + - "()") + "USING row " + + "options " + + "()") snc.sql("insert into ColumnTable(\"a/b\",col2,col3) values(1,2,3)") snc.sql("insert into rowTable(\"a/b\",col2,col3)values(1,2,3)") val result = snc.sql("SELECT col2+1 FROM " + tableName) - val r = result.collect - result.show() + val r = result.collect() assert(r.length == 1) - val result1 = snc.sql("SELECT \"a/b\"/1 FROM " + tableName) - val r1 = result1.collect - result1.show() - snc.sql("SELECT \"a/b\"/1 FROM rowTable").show + val r1 = result1.collect() + snc.sql("SELECT \"a/b\"/1 FROM rowTable").collect() assert(r1.length == 1) - snc.sql("drop table if exists columntable") snc.sql("drop table if exists rowtable") logInfo("Successful") diff --git a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala index e528ed8748..b7b8576ef8 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala @@ -489,7 +489,7 @@ class ColumnTableTest val rdd = sc.parallelize(data, data.length).map(s => new Data(s(0), s(1), s(2))) val dataDF = snc.createDataFrame(rdd) snc.registerDataFrameAsTable(dataDF, "tempTable") - snc.sql("select * from tempTable").show + snc.sql("select * from tempTable").collect() intercept[AnalysisException] { // not supported snc.sql("alter table tempTable add column age int") @@ -1235,7 +1235,7 @@ class ColumnTableTest snc.sql("create table t1(a int,b int) using column options()") snc.sql("insert into t1 values(1,2)") - snc.sql("select * from t1").show + snc.sql("select * from t1").collect() snc.sql("create table t2(c int,d int) using column options() as (select * from t1)") snc.sql("create table t3 using column options() as (select * from t1)") @@ -1262,21 +1262,19 @@ class ColumnTableTest }', header 'false', inferschema 'true')") snc.sql("create table test2 using column options() as (select * from test1)") val df2 = snc.sql("select * from test2") - df2.show() + df2.collect() snc.sql("drop table test2") snc.sql("create table test2(_col1 integer,__col2 integer) using column options()") snc.sql("insert into test2 values(1,2)") snc.sql("insert into test2 values(2,3)") val df3 = snc.sql("select _col1,__col2 from test2") - df3.show() + df3.collect() val struct = (new StructType()) .add(StructField("_COL1", IntegerType, true)) .add(StructField("__COL2", IntegerType, true)) - df3.printSchema() assert(struct == df3.schema) - } test("Test loading json data to column table") { @@ -1305,7 +1303,7 @@ class ColumnTableTest "address.district, " + "address.lane " + "FROM people") - nameAndAddress.toJSON.show(truncate = false) + logInfo(nameAndAddress.toJSON.collect().mkString("\n")) assert(nameAndAddress.count() == 2) val rows: Array[String] = nameAndAddress.toJSON.collect() @@ -1368,7 +1366,7 @@ class ColumnTableTest snc.sql(s"insert into t1 values(2,'test2')") snc.sql(s"insert into t1 values(3,'test3')") val df = snc.sql("select * from t1") - df.show + df.collect() val tempPath = "/tmp/" + System.currentTimeMillis() assert(df.count() == 3) @@ -1377,7 +1375,7 @@ class ColumnTableTest Map("path" -> tempPath, "header" -> "true", "inferSchema" -> "true")) val dataDF = snc.sql("select * from TEST_EXTERNAL order by c1") - snc.sql("select * from TEST_EXTERNAL").show + snc.sql("select * from TEST_EXTERNAL").collect() assert(dataDF.count == 3) @@ -1403,8 +1401,9 @@ class ColumnTableTest snc.sql("drop table if exists test") } + test("Test method for getting key columns of the column tables") { - var session = new SnappySession(snc.sparkContext) + val session = new SnappySession(snc.sparkContext) session.sql("drop table if exists temp1") session.sql("drop table if exists temp2") session.sql("drop table if exists temp3") @@ -1420,6 +1419,40 @@ class ColumnTableTest "id2 bigint not null, id3 bigint not null) USING column " + "OPTIONS(key_columns 'id2,id1,id3' ) ") + // if key_columns are not present, then CREATE TABLE should fail (SNAP-2790) + try { + session.sql("create table ct1(id1 bigint not null , name1 varchar(10)) " + + "USING column OPTIONS(key_columns 'id')") + fail("should have failed") + } catch { + case _: AnalysisException => // expected + } + try { + session.sql("create table ct1(id1 bigint not null , name1 varchar(10)) " + + "USING column OPTIONS(partition_by 'id')") + fail("should have failed") + } catch { + case _: AnalysisException => // expected + } + try { + session.sql("create table ct1(id1 bigint not null , name1 varchar(10)) " + + "USING column OPTIONS(partition_by 'id1', key_columns 'id')") + fail("should have failed") + } catch { + case _: AnalysisException => // expected + } + // key_columns with row tables should fail + try { + session.sql("create table rt1(id1 bigint not null , name1 varchar(10)) " + + "USING row OPTIONS(key_columns 'id1')") + fail("should have failed") + } catch { + case _: AnalysisException => // expected + } + session.sql("create table ct1(id1 bigint not null , name1 varchar(10)) " + + "USING column OPTIONS(partition_by 'id1', key_columns 'id1')") + session.sql("drop table ct1") + val res1 = session.sessionCatalog.getKeyColumns("temp1") assert(res1.size == 1) @@ -1462,7 +1495,7 @@ class ColumnTableTest snc.sql(s"insert into t1 values(2,'test2')") snc.sql(s"insert into t1 values(3,'test3')") val df = snc.sql("select * from t1") - df.show + df.collect() val tempPath = System.getProperty("user.dir") + System.currentTimeMillis() assert(df.count() == 3) diff --git a/core/src/test/scala/org/apache/spark/sql/store/CreateIndexTest.scala b/core/src/test/scala/org/apache/spark/sql/store/CreateIndexTest.scala index e7333fbc17..fb649da488 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/CreateIndexTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/CreateIndexTest.scala @@ -126,7 +126,7 @@ class CreateIndexTest extends SnappyFunSuite with BeforeAndAfterEach { } executeQ(s"select * from $tableName where col2 = 'aaa' ") { - CreateIndexTest.validateIndex(Seq.empty, tableName)(_) + CreateIndexTest.validateIndex(Nil, tableName)(_) } executeQ(s"select * from $tableName where col2 = 'bbb' and col3 = 'halo' ") { @@ -366,7 +366,7 @@ class CreateIndexTest extends SnappyFunSuite with BeforeAndAfterEach { executeQ(s"select t1.col2, t2.col3 from $table1 t1, $table2 t2 where t1.col2 = t2.col3 " + s"and t1.col3 = t2.col2 ") { - CreateIndexTest.validateIndex(Seq.empty, table1, table2)(_) + CreateIndexTest.validateIndex(Nil, table1, table2)(_) } executeQ(s"select t1.col2, t2.col3 from $table2 t1 join $table3 t2 on t1.col2 = t2.col2 " + @@ -386,7 +386,7 @@ class CreateIndexTest extends SnappyFunSuite with BeforeAndAfterEach { executeQ(s"select t1.col2, t2.col3 from $table1 t1 /*+ index( ) */ join $table3 t2 on t1.col2" + s" = t2.col2 and t1.col3 = t2.col3 ") { - CreateIndexTest.validateIndex(Seq.empty, table1, table3)(_) + CreateIndexTest.validateIndex(Nil, table1, table3)(_) } executeQ(s"select * from $table1 /*+ ${QueryHint.Index}($index1) */, $table3 " + @@ -405,7 +405,7 @@ class CreateIndexTest extends SnappyFunSuite with BeforeAndAfterEach { } executeQ(s"select * from $table1 tab1 join $table2 tab2 on tab1.col2 = tab2.col2") { - CreateIndexTest.validateIndex(Seq.empty, table1, table2)(_) + CreateIndexTest.validateIndex(Nil, table1, table2)(_) } try { @@ -603,7 +603,7 @@ class CreateIndexTest extends SnappyFunSuite with BeforeAndAfterEach { s"$table2 t2 where xx.col2 = t2.col2 and xx.col3 = t2.col3 " + s"and t1.col4 = xx.col5 ") { // t1 -> t4, t2 -> t4 - CreateIndexTest.validateIndex(Seq.empty, table1, table2, table4)(_) + CreateIndexTest.validateIndex(Nil, table1, table2, table4)(_) } executeQ(s"select t1.col2, t2.col3 from $table1 t1, $table4 t4, $rtable5 t5, $table2 t2 " + @@ -702,7 +702,7 @@ class CreateIndexTest extends SnappyFunSuite with BeforeAndAfterEach { val executeQ = CreateIndexTest.QueryExecutor(snContext, false, false) val selDF = executeQ(s"select * from $table1") { - CreateIndexTest.validateIndex(Seq.empty, s"$table1")(_) + CreateIndexTest.validateIndex(Nil, s"$table1")(_) } val baseRows = selDF.collect().toSet @@ -771,7 +771,6 @@ class CreateIndexTest extends SnappyFunSuite with BeforeAndAfterEach { val result = snContext.sql("select COL1 from " + tableName + " where COL2 like '%a%'") - result.explain(true) doPrint("") doPrint("=============== RESULTS START ===============") result.collect.foreach(doPrint) @@ -808,17 +807,15 @@ object CreateIndexTest extends SnappyFunSuite { val selectRes = snContext.sql(sqlText) if (withExplain || explainQ) { - selectRes.explain(true) + // selectRes.explain(true) } validate(selectRes) if (showResults) { - selectRes.show + logInfo(selectRes.collect().take(20).mkString("\n")) } else { - // scalastyle:off println - selectRes.collect().take(10).foreach(println) - // scalastyle:on println + logInfo(selectRes.collect().take(10).mkString("\n")) } selectRes diff --git a/core/src/test/scala/org/apache/spark/sql/store/CubeRollupGroupingSetsTest.scala b/core/src/test/scala/org/apache/spark/sql/store/CubeRollupGroupingSetsTest.scala index 2aa255799a..a6939284f9 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/CubeRollupGroupingSetsTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/CubeRollupGroupingSetsTest.scala @@ -20,8 +20,8 @@ import io.snappydata.SnappyFunSuite import io.snappydata.core.Data import org.scalatest.BeforeAndAfterAll -import org.apache.spark.sql.{SaveMode, Row, DataFrame} import org.apache.spark.sql.functions._ +import org.apache.spark.sql.{DataFrame, SaveMode} class CubeRollupGroupingSetsTest extends SnappyFunSuite with BeforeAndAfterAll { private var testData: DataFrame = _ @@ -29,7 +29,7 @@ class CubeRollupGroupingSetsTest extends SnappyFunSuite with BeforeAndAfterAll { override def beforeAll() { val data = Seq(Seq(1, 2005, 12000), Seq(1, 2006, 18000), Seq(1, 2007, 25000), Seq(2, 2005, 15000), Seq(2, 2006, 6000), Seq(2, 2007, 25000)) - val testRDD = sc.parallelize(data, data.length).map(s => new Data(s(0), s(1), s(2))) + val testRDD = sc.parallelize(data, data.length).map(s => Data(s.head, s(1), s(2))) testData = snc.createDataFrame(testRDD) snc.sql("Create table mytable (col1 INT, col2 INT, col3 INT)") testData.write.format("row").mode(SaveMode.Append).saveAsTable("mytable") @@ -40,47 +40,51 @@ class CubeRollupGroupingSetsTest extends SnappyFunSuite with BeforeAndAfterAll { } test("snappy cube_rollup query") { - //group by - val dfGroupByResult = testData.groupBy("col1", "col2").agg(sum("col2")).orderBy("col1", "col2").collect() + // group by + val dfGroupByResult = testData.groupBy("col1", "col2") + .agg(sum("col2")).orderBy("col1", "col2").collect() val snappyGroupByResult = snc.sql("select col1, col2, sum(col2) from mytable " + "group by col1, col2 order by col1, col2").collect() - println("DataFrame group by result") - dfGroupByResult.foreach(println) - println("SnappySQL group by result") - snappyGroupByResult.foreach(println) + logInfo("DataFrame group by result") + logInfo(dfGroupByResult.mkString("\n")) + logInfo("SnappySQL group by result") + logInfo(snappyGroupByResult.mkString("\n")) assert(dfGroupByResult.sameElements(snappyGroupByResult)) - //roll up - val dfRollupResult = testData.rollup("col1", "col2").agg(sum("col3")).orderBy("col1", "col2").collect() - val snappyRollupResult = snc.sql("select col1, col2, sum(col3) from mytable group by col1, col2 " + - "with rollup order by col1, col2").collect() + // roll up + val dfRollupResult = testData.rollup("col1", "col2") + .agg(sum("col3")).orderBy("col1", "col2").collect() + val snappyRollupResult = snc.sql( + "select col1, col2, sum(col3) from mytable group by col1, col2 " + + "with rollup order by col1, col2").collect() - println("DataFrame rollup result") - dfRollupResult.foreach(println) - println("SnappySQL rollup result") - snappyRollupResult.foreach(println) + logInfo("DataFrame rollup result") + logInfo(dfRollupResult.mkString("\n")) + logInfo("SnappySQL rollup result") + logInfo(snappyRollupResult.mkString("\n")) assert(dfRollupResult.sameElements(snappyRollupResult)) // cube - val dfCubeResult = testData.cube("col1", "col2").agg(sum("col3")).orderBy("col1", "col2").collect() - val snappyCubeResult = snc.sql("select col1, col2, sum(col3) from mytable group by col1, col2 " + - "with cube order by col1, col2").collect() + val dfCubeResult = testData.cube("col1", "col2") + .agg(sum("col3")).orderBy("col1", "col2").collect() + val snappyCubeResult = snc.sql( + "select col1, col2, sum(col3) from mytable group by col1, col2 " + + "with cube order by col1, col2").collect() - println("DataFrame cube result") - dfCubeResult.foreach(println) - println("SnappySQL cube result") - snappyCubeResult.foreach(println) + logInfo("DataFrame cube result") + logInfo(dfCubeResult.mkString("\n")) + logInfo("SnappySQL cube result") + logInfo(snappyCubeResult.mkString("\n")) assert(dfCubeResult.sameElements(snappyCubeResult)) // grouping sets query equivalent to above cube query val snappyGoupingSetResult = snc.sql("select col1, col2, sum(col3) from mytable group by col1, col2 " + "grouping sets ((col1, col2), (col1), (col2), ()) order by col1, col2").collect() - println("DataFrame cube result") - dfCubeResult.foreach(println) - println("SnappySQL gouping sets result") - snappyGoupingSetResult.foreach(println) + logInfo("DataFrame cube result") + logInfo(dfCubeResult.mkString("\n")) + logInfo("SnappySQL gouping sets result") + logInfo(snappyGoupingSetResult.mkString("\n")) assert(dfCubeResult.sameElements(snappyGoupingSetResult)) - } } diff --git a/core/src/test/scala/org/apache/spark/sql/store/MetadataTest.scala b/core/src/test/scala/org/apache/spark/sql/store/MetadataTest.scala index e79042c7b0..c922e40b95 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/MetadataTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/MetadataTest.scala @@ -16,7 +16,7 @@ */ package org.apache.spark.sql.store -import java.sql.{SQLException, Statement} +import java.sql.SQLException import java.util.regex.Pattern import com.gemstone.gemfire.internal.shared.ClientSharedUtils @@ -25,14 +25,10 @@ import com.pivotal.gemfirexd.internal.engine.diag.SysVTIs import io.snappydata.SnappyFunSuite import org.scalatest.Assertions -import org.apache.spark.executor.InputMetrics import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, NoSuchTableException} -import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder} import org.apache.spark.sql.execution.columnar.impl.ColumnPartitionResolver -import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils -import org.apache.spark.sql.row.SnappyStoreDialect import org.apache.spark.sql.types._ -import org.apache.spark.sql.{AnalysisException, Dataset, Row, SnappySession} +import org.apache.spark.sql.{AnalysisException, Dataset, Row} /** * Tests for meta-data queries using Spark SQL. @@ -118,21 +114,6 @@ object MetadataTest extends Assertions { getLongVarcharTuple("GATEWAYSENDERS"), ("OFFHEAPENABLED", 0, "BOOLEAN", false), ("ROWLEVELSECURITYENABLED", 0, "BOOLEAN", false)) - def resultSetToDataset(session: SnappySession, stmt: Statement) - (sql: String): Dataset[Row] = { - if (stmt.execute(sql)) { - val rs = stmt.getResultSet - val schema = JdbcUtils.getSchema(rs, SnappyStoreDialect) - val dummyMetrics = new InputMetrics - val rows = JdbcUtils.resultSetToSparkInternalRows(rs, schema, dummyMetrics) - .map(_.copy()).toSeq - session.internalCreateDataFrame(session.sparkContext.makeRDD(rows), schema) - } else { - implicit val encoder: ExpressionEncoder[Row] = RowEncoder(StructType(Nil)) - session.createDataset[Row](Nil) - } - } - def testSYSTablesAndVTIs(executeSQL: String => Dataset[Row], hostName: String = ClientSharedUtils.getLocalHost.getCanonicalHostName, netServers: Seq[String] = Seq(""), locator: String = "", locatorNetServer: String = "", diff --git a/core/src/test/scala/org/apache/spark/sql/store/RowTableTest.scala b/core/src/test/scala/org/apache/spark/sql/store/RowTableTest.scala index 364895a9da..b3c43197dc 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/RowTableTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/RowTableTest.scala @@ -54,12 +54,12 @@ class RowTableTest dataDF.write.format("row").saveAsTable("MY_SCHEMA.MY_TABLE") var result = snc.sql("SELECT * FROM MY_SCHEMA.MY_TABLE" ) var r = result.collect - println(r.length) + logInfo(r.length.toString) snc.sql("drop table MY_SCHEMA.MY_TABLE" ) snc.sql("drop schema MY_SCHEMA") - println("Successful") + logInfo("Successful") } @@ -72,7 +72,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 0) - println("Successful") + logInfo("Successful") } test("Test the fetch first n row only test. with and without n parameter") { @@ -88,7 +88,7 @@ class RowTableTest result = snc.sql("SELECT * FROM " + tableName + " fetch first row only") r = result.collect assert(r.length == 0) - println("Successful") + logInfo("Successful") } test("Test the creation of table using DataSource API") { @@ -102,7 +102,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 5) - println("Successful") + logInfo("Successful") } test("Test the creation of table using DataSource API(PUT)") { @@ -115,14 +115,14 @@ class RowTableTest } dataDF.write.format("row").options(props).saveAsTable(tableName) - //Again do putInto, as there is no primary key, all will be appended + // Again do putInto, as there is no primary key, all will be appended dataDF.write.format("row").mode(SaveMode.Overwrite).options(props).putInto(tableName) val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect // no primary key assert(r.length == 10) - println("Successful") + logInfo("Successful") } @@ -174,7 +174,7 @@ class RowTableTest r = result.collect assert(r.length == 6) - println("Successful") + logInfo("Successful") } val options = "OPTIONS (PARTITION_BY 'Col1')" @@ -188,7 +188,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 0) - println("Successful") + logInfo("Successful") } test("Test the creation/dropping of table using SQ with explicit URL") { @@ -199,7 +199,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 0) - println("Successful") + logInfo("Successful") } test("Test the creation using SQL and insert a DF in append/overwrite/errorifexists mode") { @@ -220,7 +220,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 5) - println("Successful") + logInfo("Successful") } test("Test the creation using SQL and put a DF in append/overwrite/errorifexists mode") { @@ -237,7 +237,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 5) - println("Successful") + logInfo("Successful") } test("Test the creation using SQL and put a seq of rows in append/overwrite/errorifexists mode") { @@ -252,7 +252,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 5) - println("Successful") + logInfo("Successful") } // should throw exception if primary key is getting updated? @@ -270,14 +270,14 @@ class RowTableTest val r = result.collect assert(r.length == 5) - //check if the row against primary key 1 is 1, 200, 300 + // check if the row against primary key 1 is 1, 200, 300 - val row1 = snc.sql(s"SELECT * FROM $tableName WHERE Col1='1'") - assert(row1.collect.length == 1) + val row1 = snc.sql(s"SELECT * FROM $tableName WHERE Col1='1'").collect() + assert(row1.length == 1) - println(row1.show) + logInfo(row1.mkString("\n")) - println("Successful") + logInfo("Successful") } test("Test Creation using SQL with Primary Key and PUT INTO SELECT AS ") { @@ -309,15 +309,15 @@ class RowTableTest val r = result.collect assert(r.length == 5) - //check if the row against primary key 1 is 1, 200, 300 + // check if the row against primary key 1 is 1, 200, 300 - val row1 = snc.sql(s"SELECT * FROM $tableName WHERE Col1='1'") - assert(row1.collect.length == 1) + val row1 = snc.sql(s"SELECT * FROM $tableName WHERE Col1='1'").collect() + assert(row1.length == 1) - println(row1.show) + logInfo(row1.mkString("\n")) snc.dropTable("tempTable") - println("Successful") + logInfo("Successful") } test("PUT INTO TABLE USING SQL"){ @@ -368,7 +368,7 @@ class RowTableTest val result = snc.sql("SELECT * FROM " + tableName) val r = result.collect assert(r.length == 5) - println("Successful") + logInfo("Successful") } test("Test the creation of table using CREATE TABLE AS STATEMENT ") { @@ -393,7 +393,7 @@ class RowTableTest assert(r.length == 10) snc.dropTable(tableName2) - println("Successful") + logInfo("Successful") } test("Test alter table SQL syntax") { @@ -479,7 +479,7 @@ class RowTableTest r = result.collect assert(r.length == 0) - println("Successful") + logInfo("Successful") } test("Test the drop syntax SnappyContext and SQL ") { @@ -501,7 +501,7 @@ class RowTableTest snc.sql("DROP TABLE IF EXISTS " + tableName) - println("Successful") + logInfo("Successful") } test("Test the drop syntax SQL and SnappyContext ") { @@ -523,7 +523,7 @@ class RowTableTest snc.dropTable(tableName, true) - println("Successful") + logInfo("Successful") } test("Test the update table ") { @@ -556,7 +556,7 @@ class RowTableTest snc.dropTable("RowTableUpdate") snc.dropTable("RowTableUpdate2") - println("Successful") + logInfo("Successful") } @@ -684,7 +684,7 @@ class RowTableTest assert(r.length == 10) snc.dropTable(tableName2) - println("Successful") + logInfo("Successful") } test("Test create table from CSV without header- SNAP-1442") { diff --git a/core/src/test/scala/org/apache/spark/sql/store/SnappyCatalogSuite.scala b/core/src/test/scala/org/apache/spark/sql/store/SnappyCatalogSuite.scala index b9fe20d503..1cc1756c98 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/SnappyCatalogSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/SnappyCatalogSuite.scala @@ -421,7 +421,7 @@ abstract class CatalogTestUtils { } def newFunc(name: String, database: Option[String] = None): CatalogFunction = { - CatalogFunction(FunctionIdentifier(name, database), funcClass, Seq.empty[FunctionResource]) + CatalogFunction(FunctionIdentifier(name, database), funcClass, Nil) } /** diff --git a/core/src/test/scala/org/apache/spark/sql/store/TokenizationTest.scala b/core/src/test/scala/org/apache/spark/sql/store/TokenizationTest.scala index d21c840aa1..1e26c5a5dc 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/TokenizationTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/TokenizationTest.scala @@ -278,7 +278,6 @@ class TokenizationTest res = snc.sql(s"select quartile, avg(c) as avgC, max(c) as maxC" + s" from (select c, ntile(4) over (order by c) as quartile from $table ) x " + s"group by quartile order by quartile").collect() - // res.foreach(println) // Unix timestamp val df = snc.sql(s"select * from $table where UNIX_TIMESTAMP('2015-01-01 12:00:00') > a") @@ -480,20 +479,18 @@ class TokenizationTest s"select * from $table where a = $x" } val start = System.currentTimeMillis() - // scalastyle:off println q.zipWithIndex.foreach { case (x, i) => var result = snc.sql(x).collect() assert(result.length === 1) result.foreach( r => { - println(s"${r.get(0)}, ${r.get(1)}, ${r.get(2)}, ${i}") + logInfo(s"${r.get(0)}, ${r.get(1)}, ${r.get(2)}, $i") assert(r.get(0) == r.get(1) && r.get(2) == i) }) } val end = System.currentTimeMillis() // snc.sql(s"select * from $table where a = 1200").collect() - println("Time taken = " + (end - start)) - // scalastyle:on println + logInfo("Time taken = " + (end - start)) val cacheMap = SnappySession.getPlanCache.asMap() assert( cacheMap.size() == 1) @@ -672,9 +669,8 @@ class TokenizationTest var query = s"select * from $table t1, $table2 t2 where t1.a = t2.a and t1.b = 5 limit 2" // snc.sql("set spark.sql.autoBroadcastJoinThreshold=-1") val result1 = snc.sql(query).collect() - // scalastyle:off println result1.foreach( r => { - println(r.get(0) + ", " + r.get(1) + r.get(2) + ", " + r.get(3) + r.get(4) + + logInfo(r.get(0) + ", " + r.get(1) + r.get(2) + ", " + r.get(3) + r.get(4) + ", " + r.get(5)) }) val cacheMap = SnappySession.getPlanCache.asMap() @@ -684,10 +680,9 @@ class TokenizationTest query = s"select * from $table t1, $table2 t2 where t1.a = t2.a and t1.b = 7 limit 2" val result2 = snc.sql(query).collect() result2.foreach( r => { - println(r.get(0) + ", " + r.get(1) + r.get(2) + ", " + r.get(3) + r.get(4) + + logInfo(r.get(0) + ", " + r.get(1) + r.get(2) + ", " + r.get(3) + r.get(4) + ", " + r.get(5)) }) - // scalastyle:on println assert( cacheMap.size() == 1) assert(!result1.sameElements(result2)) assert(result1.length > 0) @@ -854,7 +849,6 @@ class TokenizationTest val rows1 = rs1.collect() assert(rows0.sameElements(rows1)) - // rows1.foreach(println) val cacheMap = SnappySession.getPlanCache.asMap() assert(cacheMap.size() == 0) diff --git a/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala b/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala index 2e4b3f7e07..3dce18c0b2 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala @@ -17,17 +17,63 @@ package org.apache.spark.sql.store +import java.sql.SQLException + import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem +import io.snappydata.SnappyFunSuite.checkAnswer import io.snappydata.{Property, SnappyFunSuite} +import org.scalatest.Assertions import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, HashJoinExec} -import org.apache.spark.sql.{AnalysisException, Row, SnappySession} +import org.apache.spark.sql.{AnalysisException, Dataset, Row, SnappySession} /** * Tests for temporary, global and persistent views. */ class ViewTest extends SnappyFunSuite { + override def beforeAll(): Unit = { + super.beforeAll() + ViewTest.createTables(this.snc.snappySession) + } + + override def afterAll(): Unit = { + ViewTest.dropTables(this.snc.snappySession) + super.afterAll() + } + + test("temporary view") { + ViewTest.testTemporaryView(snc.snappySession.sql, () => new SnappySession(sc).sql) + } + + test("global temporary view") { + ViewTest.testGlobalTemporaryView(snc.snappySession.sql, () => new SnappySession(sc).sql) + } + + test("temporary view using") { + ViewTest.testTemporaryViewUsing(snc.snappySession.sql, () => new SnappySession(sc).sql) + } + + test("global temporary view using") { + ViewTest.testGlobalTemporaryViewUsing(snc.snappySession.sql, () => new SnappySession(sc).sql) + } + + test("persistent view") { + ViewTest.testPersistentView(snc.snappySession.sql, checkPlans = true, + () => new SnappySession(sc).sql, restartSpark) + } + + private def restartSpark(): Unit = { + stopAll() + val sys = InternalDistributedSystem.getConnectedInstance + if (sys ne null) { + sys.disconnect() + } + } +} + +object ViewTest extends Assertions { + private val columnTable = "viewColTable" private val rowTable = "viewRowTable" private val numRows = 10 @@ -35,275 +81,306 @@ class ViewTest extends SnappyFunSuite { private val viewTempMeta = Seq(Row("ID", "int", null), Row("ADDR", "string", null), Row("RANK", "int", null)) - override def beforeAll(): Unit = { - super.beforeAll() - val session = this.snc.snappySession + private def getExpectedResult: Seq[Row] = { + (0 until numRows).map(i => Row(i, "address_" + (i + 1), i + 1)) + } + + private def tableExists(executeSQL: String => Dataset[Row], name: String): Boolean = { + try { + executeSQL(s"select 1 from $name where 1 = 0") + true + } catch { + case _: Exception => false + } + } + + def createTables(session: SnappySession): Unit = { session.sql(s"create table $columnTable (id int, addr varchar(20)) using column " + "options (partition_by 'id')") session.sql(s"create table $rowTable (id int, addr varchar(20)) using row " + s"options (partition_by 'id', colocate_with '$columnTable')") val rows = (0 until numRows).map(i => Row(i, "address_" + (i + 1))) - snc.insert(columnTable, rows: _*) - snc.insert(rowTable, rows: _*) - } - - private def getExpectedResult: Seq[Row] = { - (0 until numRows).map(i => Row(i, "address_" + (i + 1), i + 1)) + session.insert(columnTable, rows: _*) + session.insert(rowTable, rows: _*) } - private def tableExists(session: SnappySession, name: String): Boolean = { - val identifier = session.tableIdentifier(name) - session.sessionCatalog.isTemporaryTable(identifier) || - session.sessionCatalog.tableExists(identifier) + def dropTables(session: SnappySession): Unit = { + session.sql(s"drop table $rowTable") + session.sql(s"drop table $columnTable") } - test("temporary view") { - val session = this.snc.snappySession - + def testTemporaryView(executeSQL: String => Dataset[Row], + newExecution: () => String => Dataset[Row]): Unit = { val tableMeta = Seq(Row("ID", "int", null), Row("ADDR", "varchar(20)", null)) - checkAnswer(session.sql(s"describe $columnTable"), tableMeta) - checkAnswer(session.sql(s"describe $rowTable"), tableMeta) + checkAnswer(executeSQL(s"describe $columnTable"), tableMeta) + checkAnswer(executeSQL(s"describe $rowTable"), tableMeta) val expected = getExpectedResult val showResult = Seq(Row("", "VIEWONTABLE", true, false)) // check temporary view and its meta-data for column table - session.sql(s"create temporary view viewOnTable as $viewQuery from $columnTable") + executeSQL(s"create temporary view viewOnTable as $viewQuery from $columnTable") - assert(tableExists(session, "viewOnTable") === true) - checkAnswer(session.sql("describe viewOnTable"), viewTempMeta) - checkAnswer(session.sql("select * from viewOnTable"), expected) - checkAnswer(session.sql("show views"), showResult) - checkAnswer(session.sql("show views in app"), showResult) - checkAnswer(session.sql("show views from app"), showResult) + assert(tableExists(executeSQL, "viewOnTable") === true) + checkAnswer(executeSQL("describe viewOnTable"), viewTempMeta) + checkAnswer(executeSQL("select * from viewOnTable"), expected) + checkAnswer(executeSQL("show views"), showResult) + checkAnswer(executeSQL("show views in app"), showResult) + checkAnswer(executeSQL("show views from app"), showResult) // should not be visible from another session - val session2 = session.newSession() - assert(tableExists(session2, "viewOnTable") === false) + val executeSQL2 = newExecution() + assert(tableExists(executeSQL2, "viewOnTable") === false) // drop and check unavailability - session.sql("drop view viewOnTable") - assert(tableExists(session, "viewOnTable") === false) - assert(tableExists(session2, "viewOnTable") === false) + executeSQL("drop view viewOnTable") + assert(tableExists(executeSQL, "viewOnTable") === false) + assert(tableExists(executeSQL2, "viewOnTable") === false) // check the same for view on row table - session.sql(s"create temporary view viewOnTable as $viewQuery from $rowTable") + executeSQL(s"create temporary view viewOnTable as $viewQuery from $rowTable") - assert(tableExists(session, "viewOnTable") === true) - checkAnswer(session.sql("describe viewOnTable"), viewTempMeta) - checkAnswer(session.sql("select * from viewOnTable"), expected) + assert(tableExists(executeSQL, "viewOnTable") === true) + checkAnswer(executeSQL("describe viewOnTable"), viewTempMeta) + checkAnswer(executeSQL("select * from viewOnTable"), expected) - assert(tableExists(session2, "viewOnTable") === false) - session.sql("drop view viewOnTable") - assert(tableExists(session, "viewOnTable") === false) - assert(tableExists(session2, "viewOnTable") === false) - - session2.close() + assert(tableExists(executeSQL2, "viewOnTable") === false) + executeSQL("drop view viewOnTable") + assert(tableExists(executeSQL, "viewOnTable") === false) + assert(tableExists(executeSQL2, "viewOnTable") === false) } - test("global temporary view") { - val session = this.snc.snappySession - + def testGlobalTemporaryView(executeSQL: String => Dataset[Row], + newExecution: () => String => Dataset[Row]): Unit = { val expected = getExpectedResult val showResult = Seq(Row("GLOBAL_TEMP", "VIEWONTABLE", true, true)) // check temporary view and its meta-data for column table - session.sql(s"create global temporary view viewOnTable as $viewQuery from $columnTable") + executeSQL(s"create global temporary view viewOnTable as $viewQuery from $columnTable") - assert(session.sessionCatalog.getGlobalTempView("viewOnTable").isDefined) - checkAnswer(session.sql("describe global_temp.viewOnTable"), viewTempMeta) - checkAnswer(session.sql("select * from viewOnTable"), expected) - checkAnswer(session.sql("show views"), Nil) - checkAnswer(session.sql("show views in global_temp"), showResult) - checkAnswer(session.sql("show views from global_temp"), showResult) + assert(executeSQL("show views in global_temp").collect() === + Array(Row("GLOBAL_TEMP", "VIEWONTABLE", true, true))) + checkAnswer(executeSQL("describe global_temp.viewOnTable"), viewTempMeta) + checkAnswer(executeSQL("select * from viewOnTable"), expected) + checkAnswer(executeSQL("show views"), Nil) + checkAnswer(executeSQL("show views in global_temp"), showResult) + checkAnswer(executeSQL("show views from global_temp"), showResult) // should be visible from another session - val session2 = session.newSession() - assert(session2.sessionCatalog.getGlobalTempView("viewOnTable").isDefined) - checkAnswer(session2.sql("describe global_temp.viewOnTable"), viewTempMeta) - checkAnswer(session2.sql("select * from viewOnTable"), expected) + val executeSQL2 = newExecution() + assert(executeSQL2("show views in global_temp").collect() === + Array(Row("GLOBAL_TEMP", "VIEWONTABLE", true, true))) + checkAnswer(executeSQL2("describe global_temp.viewOnTable"), viewTempMeta) + checkAnswer(executeSQL2("select * from viewOnTable"), expected) // drop and check unavailability - session.sql("drop view viewOnTable") - assert(session.sessionCatalog.getGlobalTempView("viewOnTable").isEmpty) - assert(session2.sessionCatalog.getGlobalTempView("viewOnTable").isEmpty) + executeSQL("drop view viewOnTable") + assert(executeSQL("show views in global_temp").collect().isEmpty) + assert(executeSQL2("show views in global_temp").collect().isEmpty) // check the same for view on row table - session.sql(s"create global temporary view viewOnTable as $viewQuery from $columnTable") - - assert(session.sessionCatalog.getGlobalTempView("viewOnTable").isDefined) - checkAnswer(session.sql("describe global_temp.viewOnTable"), viewTempMeta) - checkAnswer(session.sql("select * from viewOnTable"), expected) + executeSQL(s"create global temporary view viewOnTable as $viewQuery from $columnTable") - assert(session2.sessionCatalog.getGlobalTempView("viewOnTable").isDefined) - checkAnswer(session2.sql("describe global_temp.viewOnTable"), viewTempMeta) - checkAnswer(session2.sql("select * from viewOnTable"), expected) + assert(executeSQL("show views in global_temp").collect() === + Array(Row("GLOBAL_TEMP", "VIEWONTABLE", true, true))) + checkAnswer(executeSQL("describe global_temp.viewOnTable"), viewTempMeta) + checkAnswer(executeSQL("select * from viewOnTable"), expected) - session.sql("drop view viewOnTable") - assert(session.sessionCatalog.getGlobalTempView("viewOnTable").isEmpty) - assert(session2.sessionCatalog.getGlobalTempView("viewOnTable").isEmpty) + assert(executeSQL2("show views in global_temp").collect() === + Array(Row("GLOBAL_TEMP", "VIEWONTABLE", true, true))) + checkAnswer(executeSQL2("describe global_temp.viewOnTable"), viewTempMeta) + checkAnswer(executeSQL2("select * from viewOnTable"), expected) - session2.close() + executeSQL("drop view viewOnTable") + assert(executeSQL("show views in global_temp").collect().isEmpty) + assert(executeSQL2("show views in global_temp").collect().isEmpty) } - test("temporary view using") { - val session = this.snc.snappySession - + def testTemporaryViewUsing(executeSQL: String => Dataset[Row], + newExecution: () => String => Dataset[Row]): Unit = { // check temporary view with USING and its meta-data val hfile: String = getClass.getResource("/2015.parquet").getPath - val airline = session.read.parquet(hfile) - session.sql(s"create temporary view airlineView using parquet options(path '$hfile')") - val airlineView = session.table("airlineView") + executeSQL(s"create external table airlineTemp using parquet options (path '$hfile')") + val airline = executeSQL("select * from airlineTemp limit 1") + executeSQL(s"create temporary view airlineView using parquet options(path '$hfile')") + val airlineView = executeSQL("select * from airlineView limit 1") - assert(tableExists(session, "airlineView") === true) + assert(tableExists(executeSQL, "airlineView") === true) assert(airlineView.schema === airline.schema) - checkAnswer(session.sql("select count(*) from airlineView"), Seq(Row(airline.count()))) - assert(airlineView.count() == airline.count()) + checkAnswer(executeSQL("select count(*) from airlineView"), + executeSQL("select count(*) from airlineTemp").collect()) // should not be visible from another session - val session2 = session.newSession() - assert(tableExists(session2, "airlineView") === false) + val executeSQL2 = newExecution() + assert(tableExists(executeSQL2, "airlineView") === false) // drop and check unavailability - session.sql("drop table airlineView") - assert(tableExists(session, "airlineView") === false) - assert(tableExists(session2, "airlineView") === false) - - session2.close() + executeSQL("drop table airlineTemp") + executeSQL("drop table airlineView") + assert(tableExists(executeSQL, "airlineTemp") === false) + assert(tableExists(executeSQL2, "airlineTemp") === false) + assert(tableExists(executeSQL, "airlineView") === false) + assert(tableExists(executeSQL2, "airlineView") === false) } - test("global temporary view using") { - val session = this.snc.snappySession - + def testGlobalTemporaryViewUsing(executeSQL: String => Dataset[Row], + newExecution: () => String => Dataset[Row]): Unit = { // check global temporary view with USING and its meta-data val hfile: String = getClass.getResource("/2015.parquet").getPath - val airline = session.read.parquet(hfile) - session.sql(s"create global temporary view airlineView using parquet options(path '$hfile')") - val airlineView = session.table("airlineView") + executeSQL(s"create external table airlineTemp using parquet options (path '$hfile')") + val airline = executeSQL("select * from airlineTemp limit 1") + executeSQL(s"create global temporary view airlineView using parquet options(path '$hfile')") + val airlineView = executeSQL("select * from airlineView limit 1") - assert(session.sessionCatalog.getGlobalTempView("airlineView").isDefined) + assert(executeSQL("show views in global_temp").collect() === + Array(Row("GLOBAL_TEMP", "AIRLINEVIEW", true, true))) assert(airlineView.schema === airline.schema) - checkAnswer(session.sql("select count(*) from airlineView"), Seq(Row(airline.count()))) - assert(airlineView.count() == airline.count()) + checkAnswer(executeSQL("select count(*) from airlineView"), + executeSQL("select count(*) from airlineTemp").collect()) // should be visible from another session - val session2 = session.newSession() - assert(session2.sessionCatalog.getGlobalTempView("airlineView").isDefined) - checkAnswer(session2.sql("select count(*) from airlineView"), Seq(Row(airline.count()))) + val executeSQL2 = newExecution() + assert(executeSQL2("show views in global_temp").collect() === + Array(Row("GLOBAL_TEMP", "AIRLINEVIEW", true, true))) + checkAnswer(executeSQL2("select count(*) from airlineView"), + executeSQL("select count(*) from airlineTemp").collect()) // drop and check unavailability - session.sql("drop table airlineView") - assert(session.sessionCatalog.getGlobalTempView("airlineView").isEmpty) - assert(session2.sessionCatalog.getGlobalTempView("airlineView").isEmpty) - - session2.close() + executeSQL("drop table airlineTemp") + executeSQL("drop table airlineView") + assert(tableExists(executeSQL, "airlineTemp") === false) + assert(tableExists(executeSQL2, "airlineTemp") === false) + assert(executeSQL("show views in global_temp").collect().isEmpty) + assert(executeSQL2("show views in global_temp").collect().isEmpty) } - test("persistent view") { + def testPersistentView(executeSQL: String => Dataset[Row], checkPlans: Boolean, + newExecution: () => String => Dataset[Row], restartSpark: () => Unit): Unit = { val expected = getExpectedResult // check temporary view and its meta-data for column table - checkPersistentView(columnTable, rowTable, snc.snappySession, expected) - // check the same for view on row table - checkPersistentView(rowTable, columnTable, snc.snappySession, expected) + checkPersistentView(columnTable, rowTable, checkPlans, executeSQL, newExecution, + expected, restartSpark) + // check the same for view on row table with new session since old one would not be valid + val newExecuteSQL = newExecution() + checkPersistentView(rowTable, columnTable, checkPlans, newExecuteSQL, newExecution, + expected, restartSpark) } - private def checkPersistentView(table: String, otherTable: String, session: SnappySession, - expectedResult: Seq[Row]): Unit = { - session.sql(s"create view viewOnTable as $viewQuery from $table") + private def checkPersistentView(table: String, otherTable: String, checkPlans: Boolean, + executeSQL: String => Dataset[Row], newExecution: () => String => Dataset[Row], + expectedResult: Seq[Row], restartSpark: () => Unit): Unit = { + executeSQL(s"create view viewOnTable as $viewQuery from $table") val viewMeta = Seq(Row("ID", "int", null), Row("ADDR", "varchar(20)", null), Row("RANK", "int", null)) val showResult = Seq(Row("APP", "VIEWONTABLE", false, false)) - assert(tableExists(session, "viewOnTable") === true) - checkAnswer(session.sql("describe viewOnTable"), viewMeta) - checkAnswer(session.sql("select * from viewOnTable"), expectedResult) - checkAnswer(session.sql("show views"), showResult) - checkAnswer(session.sql("show views in app"), showResult) - checkAnswer(session.sql("show views from app"), showResult) + assert(tableExists(executeSQL, "viewOnTable") === true) + checkAnswer(executeSQL("describe viewOnTable"), viewMeta) + checkAnswer(executeSQL("select * from viewOnTable"), expectedResult) + checkAnswer(executeSQL("show views"), showResult) + checkAnswer(executeSQL("show views in app"), showResult) + checkAnswer(executeSQL("show views from app"), showResult) // should be visible from another session - var session2 = session.newSession() - assert(tableExists(session2, "viewOnTable") === true) - checkAnswer(session2.sql("describe viewOnTable"), viewMeta) - checkAnswer(session2.sql("select * from viewOnTable"), expectedResult) + var executeSQL2 = newExecution() + assert(tableExists(executeSQL2, "viewOnTable") === true) + checkAnswer(executeSQL2("describe viewOnTable"), viewMeta) + checkAnswer(executeSQL2("select * from viewOnTable"), expectedResult) // test for SNAP-2205: see CompressionCodecId.isCompressed for a description of the problem - session.conf.set(Property.ColumnBatchSize.name, "10k") + executeSQL(s"set ${Property.ColumnBatchSize.name}=10k") // 21 columns mean 63 for ColumnStatsSchema so total of 64 fields including the COUNT // in the stats row which will fit in exactly one long for the nulls bitset val cols = (1 to 21).map(i => s"col$i string").mkString(", ") - session.sql(s"CREATE TABLE test2205 ($cols) using column options (buckets '4')") + executeSQL(s"CREATE TABLE test2205 ($cols) using column options (buckets '4')") val numElements = 10000 val projection = (1 to 21).map(i => s"null as col$i") - session.range(numElements).selectExpr(projection: _*).write.insertInto("test2205") + executeSQL( + s"insert into test2205 select ${projection.mkString(", ")} from range($numElements)") - checkAnswer(session.sql("select count(*), count(col10) from test2205"), + checkAnswer(executeSQL("select count(*), count(col10) from test2205"), Seq(Row(numElements, 0))) - // should be available after a restart - session.close() - session2.close() - stopAll() - val sys = InternalDistributedSystem.getConnectedInstance - if (sys ne null) { - sys.disconnect() - } + // test large view + val longStr = (1 to 1000).mkString("test data ", "", "") + val largeViewStr = (1 to 100).map(i => + s"case when $i % 3 == 0 then cast(null as string) else '$longStr[$i]' end as c$i").mkString( + "select ", ", ", "") + assert(largeViewStr.length > 100000) + var rs = executeSQL2(largeViewStr).collect() + assert(rs.length == 1) + executeSQL2(s"create view largeView as $largeViewStr").collect() + rs = executeSQL("select * from largeView").collect() + assert(rs.length == 1) - session2 = new SnappySession(sc) - assert(tableExists(session2, "viewOnTable") === true) - checkAnswer(session2.sql("describe viewOnTable"), viewMeta) - checkAnswer(session2.sql("select * from viewOnTable"), expectedResult) + // should be available after a restart + restartSpark() + executeSQL2 = newExecution() + assert(tableExists(executeSQL2, "viewOnTable") === true) + checkAnswer(executeSQL2("describe viewOnTable"), viewMeta) + checkAnswer(executeSQL2("select * from viewOnTable"), expectedResult) - checkAnswer(session2.sql("select count(*), count(col10) from test2205"), + checkAnswer(executeSQL2("select count(*), count(col10) from test2205"), Seq(Row(numElements, 0))) try { - session2.sql("drop table viewOnTable") + executeSQL2("drop table viewOnTable") fail("expected drop table to fail for view") } catch { - case _: AnalysisException => // expected + case _: AnalysisException | _: SQLException => // expected } // drop and check unavailability - session2.sql("drop view viewOnTable") - assert(tableExists(session2, "viewOnTable") === false) - session2.sql("drop table test2205") + executeSQL2("drop view viewOnTable") + assert(tableExists(executeSQL2, "viewOnTable") === false) + executeSQL2("drop table test2205") + + // test large view after restart + rs = executeSQL2("select * from largeView").collect() + assert(rs.length == 1) + executeSQL2("drop view largeView") // check colocated joins with VIEWs (SNAP-2204) val query = s"select c.id, r.addr from $columnTable c inner join $rowTable r on (c.id = r.id)" // first check with normal query - var ds = session2.sql(query) + var ds = executeSQL2(query) checkAnswer(ds, expectedResult.map(r => Row(r.get(0), r.get(1)))) - var plan = ds.queryExecution.executedPlan - assert(plan.find(_.isInstanceOf[HashJoinExec]).isDefined) - assert(plan.find(_.isInstanceOf[BroadcastHashJoinExec]).isEmpty) + if (checkPlans) { + val plan = ds.queryExecution.executedPlan + assert(plan.find(_.isInstanceOf[HashJoinExec]).isDefined) + assert(plan.find(_.isInstanceOf[BroadcastHashJoinExec]).isEmpty) + } val expectedResult2 = expectedResult.map(r => Row(r.get(0), r.get(1))) // check for normal view join with table - session2.sql(s"create view viewOnTable as select id, addr, id + 1 from $table") - ds = session2.sql("select t.id, v.addr from viewOnTable v " + + executeSQL2(s"create view viewOnTable as select id, addr, id + 1 from $table") + ds = executeSQL2("select t.id, v.addr from viewOnTable v " + s"inner join $otherTable t on (v.id = t.id)") checkAnswer(ds, expectedResult2) - plan = ds.queryExecution.executedPlan - assert(plan.find(_.isInstanceOf[HashJoinExec]).isDefined) - assert(plan.find(_.isInstanceOf[BroadcastHashJoinExec]).isEmpty) + if (checkPlans) { + val plan = ds.queryExecution.executedPlan + assert(plan.find(_.isInstanceOf[HashJoinExec]).isDefined) + assert(plan.find(_.isInstanceOf[BroadcastHashJoinExec]).isEmpty) + } - session2.sql("drop view viewOnTable") - assert(tableExists(session2, "viewOnTable") === false) + executeSQL2("drop view viewOnTable") + assert(tableExists(executeSQL2, "viewOnTable") === false) // next query on a join view - session2.sql(s"create view viewOnJoin as $query") - ds = session2.sql("select * from viewOnJoin") + executeSQL2(s"create view viewOnJoin as $query") + ds = executeSQL2("select * from viewOnJoin") checkAnswer(ds, expectedResult2) - plan = ds.queryExecution.executedPlan - assert(plan.find(_.isInstanceOf[HashJoinExec]).isDefined) - assert(plan.find(_.isInstanceOf[BroadcastHashJoinExec]).isEmpty) + if (checkPlans) { + val plan = ds.queryExecution.executedPlan + assert(plan.find(_.isInstanceOf[HashJoinExec]).isDefined) + assert(plan.find(_.isInstanceOf[BroadcastHashJoinExec]).isEmpty) + } - session2.sql("drop view viewOnJoin") - assert(tableExists(session2, "viewOnJoin") === false) + executeSQL2("drop view viewOnJoin") + assert(tableExists(executeSQL2, "viewOnJoin") === false) } } diff --git a/docs/connectors/jdbc_streaming_connector.md b/docs/connectors/jdbc_streaming_connector.md index fcc95c2255..4641ecfb18 100644 --- a/docs/connectors/jdbc_streaming_connector.md +++ b/docs/connectors/jdbc_streaming_connector.md @@ -95,7 +95,7 @@ return reader.writeStream() .format("snappystore") .option("sink", Mysink.class.getName()) .option("checkpointLocation", - Utils.createTempDir("/data/wrk/w/snappydata/tmg-temp", "tmg-spark") + Utils.createTempDir("/data/wrk/w/snappydata/temp", "snappy-sink") .getCanonicalPath()) .option("tableName", tableName) .start(); diff --git a/docs/security/authentication_connecting_to_a_secure_cluster.md b/docs/security/authentication_connecting_to_a_secure_cluster.md index 8fc11e8a1c..e9c687097c 100644 --- a/docs/security/authentication_connecting_to_a_secure_cluster.md +++ b/docs/security/authentication_connecting_to_a_secure_cluster.md @@ -48,7 +48,7 @@ val conf = new SparkConf() .setMaster(s"spark://$hostName:7077") .set("spark.executor.cores", TestUtils.defaultCores.toString) .set("spark.executor.extraClassPath", - getEnvironmentVariable("SNAPPY_DIST_CLASSPATH")) + getEnvironmentVariable("SNAPPY_HOME") + "/jars/*" ) .set("snappydata.connection", snappydataLocatorURL) .set("spark.snappydata.store.user", username) .set("spark.snappydata.store.password", password) diff --git a/docs/security/row_level_security.md b/docs/security/row_level_security.md index 3040e2dadb..8fb54f01b5 100644 --- a/docs/security/row_level_security.md +++ b/docs/security/row_level_security.md @@ -54,9 +54,9 @@ Initially all the users can view all the records in the table. You can restrict $ SELECT * FROM clients; id | account_name | account_manager ----+--------------+----------------- - 1 | jnj | tom - 2 | tmg | harris - 3 | tibco | greg + 1 | ibm | tom + 2 | apple | harris + 3 | msft | greg (3 rows) ``` @@ -114,7 +114,7 @@ Now the users are permitted to view the records of only those rows that are perm $ SELECT * FROM clients; id | account_name | account_manager ----+--------------+----------------- - 2 | tmg | tom + 2 | ibm | tom (1 row) ``` @@ -144,7 +144,7 @@ Here in the following example, multiple policies are created for the table named ``` CREATE POLICY mypolicy1 on mytable using user_col = current_user(); CREATE POLICY mypolicy2 on mytable using id < 4; -CREATE POLICY mypolicy3 on mytable using account_name = ‘tibco’; +CREATE POLICY mypolicy3 on mytable using account_name = ‘msft’; ALTER TABLE mytable ENABLE ROW LEVEL SECURITY; @@ -155,12 +155,12 @@ These policies are combined as shown in this example: SELECT * FROM mytable WHERE user_col = current_user() # current_user is AND id<4 -AND account_name = ‘tibco’; +AND account_name = ‘ibm’; $ select * from mytable; id | account_name | account_manager ----+--------------+----------------- - 3 | tibco | tom + 3 | ibm | tom (1 row) @@ -216,4 +216,4 @@ For example, DROP POLICY just_own_clients ``` !!!Caution - If you drop a table, all the policies associated with the table will also get dropped. \ No newline at end of file + If you drop a table, all the policies associated with the table will also get dropped. diff --git a/dtests/build.gradle b/dtests/build.gradle index f99cc4fd27..a4ca06e625 100644 --- a/dtests/build.gradle +++ b/dtests/build.gradle @@ -153,7 +153,7 @@ testClasses.doLast { include '**/*.inc' include '**/*.sql' } - into project(':snappy-store:snappydata-store-tests').sourceSets.main.output.classesDir + into project(':snappy-store:snappydata-store-tests').sourceSets.main.java.outputDir } } } diff --git a/dtests/src/resources/regression_docs/HowToRunRowStoreTests.md b/dtests/src/resources/regression_docs/HowToRunRowStoreTests.md index 3306af654e..6cba8901ac 100755 --- a/dtests/src/resources/regression_docs/HowToRunRowStoreTests.md +++ b/dtests/src/resources/regression_docs/HowToRunRowStoreTests.md @@ -16,12 +16,12 @@ ``` export SNAPPYDATA_SOURCE_DIR= -export JTESTS=$SNAPPYDATA_SOURCE_DIR/store/tests/sql/build-artifacts/linux/classes/main +export JTESTS=$SNAPPYDATA_SOURCE_DIR/store/tests/sql/build-artifacts/linux/classes/java/main $SNAPPYDATA_SOURCE_DIR/sample-runbt.sh $SNAPPYDATA_SOURCE_DIR [-l -r -m ] E.g. For running sql.bt export SNAPPYDATA_SOURCE_DIR= -export JTESTS=$SNAPPY_HOME/store/tests/sql/build-artifacts/linux/classes/main +export JTESTS=$SNAPPY_HOME/store/tests/sql/build-artifacts/linux/classes/java/main $SNAPPYDATA_SOURCE_DIR/store/tests/core/src/main/java/bin/sample-runbt.sh $SNAPPYDATA_SOURCE_DIR -l $JTESTS/sql/snappy.local.conf sql/sql.bt ``` diff --git a/dtests/src/resources/scripts/cdcConnector/bulkDelete.sql b/dtests/src/resources/scripts/cdcConnector/bulkDelete.sql index bb752d95ab..43d56486ff 100644 --- a/dtests/src/resources/scripts/cdcConnector/bulkDelete.sql +++ b/dtests/src/resources/scripts/cdcConnector/bulkDelete.sql @@ -115,15 +115,15 @@ DELETE FROM [testdatabase].[dbo].[REFERENCE_DATA ] WHE DELETE FROM [testdatabase].[dbo].[RETURNED_MAIL ] WHERE RTRN_MAIL_ID > ? ; DELETE FROM [testdatabase].[dbo].[REVENUE_CODE ] WHERE REV_CD_ID > ? ; DELETE FROM [testdatabase].[dbo].[SERVICE_CODE ] WHERE SERV_CD_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_ACCOUNTING_CODES ] WHERE ACCOUNTING_CODE_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_CLIENT_MASTER ] WHERE CLIENT_REF_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_DENIAL_REASON_CONFIG ] WHERE DENIAL_REASON_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_DNIS_CONFIG ] WHERE DNIS_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_GROUP_CODE_CONFIG ] WHERE GROUP_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_KEY_GENERATOR ] WHERE KEY_GEN_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_PLAN_CODE_CONFIG ] WHERE PLAN_CODE_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS ] WHERE VARIABLE_ID > ? ; -DELETE FROM [testdatabase].[dbo].[TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS] WHERE SUBJ_CAT_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_ACCOUNTING_CODES ] WHERE ACCOUNTING_CODE_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_CLIENT_MASTER ] WHERE CLIENT_REF_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_DENIAL_REASON_CONFIG ] WHERE DENIAL_REASON_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_DNIS_CONFIG ] WHERE DNIS_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_GROUP_CODE_CONFIG ] WHERE GROUP_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_KEY_GENERATOR ] WHERE KEY_GEN_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_PLAN_CODE_CONFIG ] WHERE PLAN_CODE_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_PTMR_VARIABLE_TRANSLATIONS ] WHERE VARIABLE_ID > ? ; +DELETE FROM [testdatabase].[dbo].[SERVICES_SUBJECT_CATEGORY_TRANSLATIONS ] WHERE SUBJ_CAT_ID > ? ; DELETE FROM [testdatabase].[dbo].[TOPIC ] WHERE TPC_ID > ? ; DELETE FROM [testdatabase].[dbo].[TOPIC_COMMUNICATION ] WHERE TPC_INQ_ID > ? ; DELETE FROM [testdatabase].[dbo].[UM_ACTIVITY ] WHERE UM_ACTY_ID > ? ; diff --git a/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCColTables.sql b/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCColTables.sql index 07d7278733..528b0084f6 100644 --- a/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCColTables.sql +++ b/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCColTables.sql @@ -1015,75 +1015,75 @@ CREATE EXTERNAL TABLE staging_RECEIPT CREATE TABLE RECEIPT USING column OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,RCPT_ID ' ) AS (SELECT * FROM staging_RECEIPT); -DROP TABLE IF EXISTS TMGSERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_PLAN_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_PLAN_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_PLAN_CODE_CONFIG USING column OPTIONS(partition_by 'PLAN_CODE_ID,CLIENT_ID', buckets '32',key_columns 'PLAN_CODE_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_TMGSERVICES_PLAN_CODE_CONFIG); +CREATE EXTERNAL TABLE staging_SERVICES_PLAN_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_PLAN_CODE_CONFIG USING column OPTIONS(partition_by 'PLAN_CODE_ID,CLIENT_ID', buckets '32',key_columns 'PLAN_CODE_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_SERVICES_PLAN_CODE_CONFIG); -DROP TABLE IF EXISTS TMGSERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS staging_TMGSERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS SERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS staging_SERVICES_KEY_GENERATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_KEY_GENERATOR - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_KEY_GENERATOR USING column OPTIONS(partition_by 'KEY_GEN_ID,CLIENT_ID', buckets '32',key_columns 'KEY_GEN_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_TMGSERVICES_KEY_GENERATOR); +CREATE EXTERNAL TABLE staging_SERVICES_KEY_GENERATOR + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_KEY_GENERATOR USING column OPTIONS(partition_by 'KEY_GEN_ID,CLIENT_ID', buckets '32',key_columns 'KEY_GEN_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_SERVICES_KEY_GENERATOR); -DROP TABLE IF EXISTS TMGSERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_GROUP_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_GROUP_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_GROUP_CODE_CONFIG USING column OPTIONS(partition_by 'GROUP_ID,CLIENT_ID', buckets '32',key_columns 'GROUP_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_TMGSERVICES_GROUP_CODE_CONFIG); +CREATE EXTERNAL TABLE staging_SERVICES_GROUP_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_GROUP_CODE_CONFIG USING column OPTIONS(partition_by 'GROUP_ID,CLIENT_ID', buckets '32',key_columns 'GROUP_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_SERVICES_GROUP_CODE_CONFIG); -DROP TABLE IF EXISTS TMGSERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS SERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DNIS_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_DNIS_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_DNIS_CONFIG USING column OPTIONS(partition_by 'DNIS_ID,CLIENT_ID', buckets '32',key_columns ' DNIS_ID,CLIENT_ID' ) AS (SELECT * FROM staging_TMGSERVICES_DNIS_CONFIG); +CREATE EXTERNAL TABLE staging_SERVICES_DNIS_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_DNIS_CONFIG USING column OPTIONS(partition_by 'DNIS_ID,CLIENT_ID', buckets '32',key_columns ' DNIS_ID,CLIENT_ID' ) AS (SELECT * FROM staging_SERVICES_DNIS_CONFIG); -DROP TABLE IF EXISTS TMGSERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS SERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DENIAL_REASON_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_DENIAL_REASON_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_DENIAL_REASON_CONFIG USING column OPTIONS(partition_by 'DENIAL_REASON_ID,CLIENT_ID', buckets '32',key_columns 'DENIAL_REASON_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_TMGSERVICES_DENIAL_REASON_CONFIG); +CREATE EXTERNAL TABLE staging_SERVICES_DENIAL_REASON_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_DENIAL_REASON_CONFIG USING column OPTIONS(partition_by 'DENIAL_REASON_ID,CLIENT_ID', buckets '32',key_columns 'DENIAL_REASON_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_SERVICES_DENIAL_REASON_CONFIG); -DROP TABLE IF EXISTS TMGSERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS staging_TMGSERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS SERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS staging_SERVICES_CLIENT_MASTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_CLIENT_MASTER - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_CLIENT_MASTER USING column OPTIONS(partition_by 'CLIENT_REF_ID,CLIENT_ID', buckets '32',key_columns 'CLIENT_REF_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_TMGSERVICES_CLIENT_MASTER); +CREATE EXTERNAL TABLE staging_SERVICES_CLIENT_MASTER + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_CLIENT_MASTER USING column OPTIONS(partition_by 'CLIENT_REF_ID,CLIENT_ID', buckets '32',key_columns 'CLIENT_REF_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_SERVICES_CLIENT_MASTER); -DROP TABLE IF EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS USING column OPTIONS(partition_by 'SUBJ_CAT_ID,CLIENT_ID', buckets '32',key_columns 'SUBJ_CAT_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS); +CREATE EXTERNAL TABLE staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_SUBJECT_CATEGORY_TRANSLATIONS USING column OPTIONS(partition_by 'SUBJ_CAT_ID,CLIENT_ID', buckets '32',key_columns 'SUBJ_CAT_ID,CLIENT_ID ' ) AS (SELECT * FROM staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS); -DROP TABLE IF EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS USING column OPTIONS(partition_by 'VARIABLE_ID', buckets '32',key_columns 'VARIABLE_ID ' ) AS (SELECT * FROM staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS); +CREATE EXTERNAL TABLE staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_PTMR_VARIABLE_TRANSLATIONS USING column OPTIONS(partition_by 'VARIABLE_ID', buckets '32',key_columns 'VARIABLE_ID ' ) AS (SELECT * FROM staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS); -DROP TABLE IF EXISTS TMGSERVICES_ACCOUNTING_CODES; -DROP TABLE IF EXISTS staging_TMGSERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS SERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS staging_SERVICES_ACCOUNTING_CODES; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE staging_TMGSERVICES_ACCOUNTING_CODES - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE TMGSERVICES_ACCOUNTING_CODES USING column OPTIONS(partition_by 'ACCOUNTING_CODE_ID,CLIENT_ID', buckets '32',key_columns ' ACCOUNTING_CODE_ID,CLIENT_ID' ) AS (SELECT * FROM staging_TMGSERVICES_ACCOUNTING_CODES); +CREATE EXTERNAL TABLE staging_SERVICES_ACCOUNTING_CODES + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE SERVICES_ACCOUNTING_CODES USING column OPTIONS(partition_by 'ACCOUNTING_CODE_ID,CLIENT_ID', buckets '32',key_columns ' ACCOUNTING_CODE_ID,CLIENT_ID' ) AS (SELECT * FROM staging_SERVICES_ACCOUNTING_CODES); DROP TABLE IF EXISTS UNAPPLIED_CASH; diff --git a/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql b/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql index a895d9d28c..07ef630d9c 100644 --- a/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql +++ b/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql @@ -2520,12 +2520,12 @@ CREATE TABLE IF NOT EXISTS RECEIPT( ) USING row OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1'); INSERT INTO RECEIPT SELECT * FROM staging_RECEIPT; -DROP TABLE IF EXISTS TMGSERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_PLAN_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PLAN_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PLAN_CODE_CONFIG ( PLAN_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2555,14 +2555,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (PLAN_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'PLAN_CODE_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_PLAN_CODE_CONFIG; + INSERT INTO SERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_SERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS staging_TMGSERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS SERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS staging_SERVICES_KEY_GENERATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_KEY_GENERATOR - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_KEY_GENERATOR + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_KEY_GENERATOR ( KEY_GEN_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2580,14 +2580,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (KEY_GEN_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'KEY_GEN_ID,CLIENT_ID', buckets '32',redundancy '1' ); - INSERT INTO TMGSERVICES_KEY_GENERATOR SELECT * FROM staging_TMGSERVICES_KEY_GENERATOR; + INSERT INTO SERVICES_KEY_GENERATOR SELECT * FROM staging_SERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS TMGSERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_GROUP_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); - CREATE TABLE IF NOT EXISTS TMGSERVICES_GROUP_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_GROUP_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + CREATE TABLE IF NOT EXISTS SERVICES_GROUP_CODE_CONFIG ( GROUP_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2606,14 +2606,14 @@ CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG PRIMARY KEY (GROUP_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'GROUP_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_GROUP_CODE_CONFIG; + INSERT INTO SERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_SERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS SERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DNIS_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DNIS_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DNIS_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DNIS_CONFIG ( DNIS_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2632,14 +2632,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DNIS_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DNIS_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_DNIS_CONFIG SELECT * FROM staging_TMGSERVICES_DNIS_CONFIG; + INSERT INTO SERVICES_DNIS_CONFIG SELECT * FROM staging_SERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS SERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DENIAL_REASON_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DENIAL_REASON_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DENIAL_REASON_CONFIG ( DENIAL_REASON_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2656,14 +2656,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DENIAL_REASON_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DENIAL_REASON_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_TMGSERVICES_DENIAL_REASON_CONFIG; + INSERT INTO SERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_SERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS staging_TMGSERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS SERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS staging_SERVICES_CLIENT_MASTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_CLIENT_MASTER - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_CLIENT_MASTER + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_CLIENT_MASTER ( CLIENT_REF_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2671,7 +2671,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER CLIENT_LEGACY_CD VARCHAR(5) NULL, CLIENT_NAME VARCHAR(10) NULL, MEMBER_ID_FORMAT VARCHAR(15) NULL, - TMG_CALL_CLIENT_CODE VARCHAR(10) NULL, + CALL_CLIENT_CODE VARCHAR(10) NULL, CREATE_date date NULL, UPDATED_date date NULL, USER_NAME VARCHAR(15) NULL, @@ -2681,14 +2681,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (CLIENT_REF_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'CLIENT_REF_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_CLIENT_MASTER SELECT * FROM staging_TMGSERVICES_CLIENT_MASTER; + INSERT INTO SERVICES_CLIENT_MASTER SELECT * FROM staging_SERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS ( SUBJ_CAT_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2710,14 +2710,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (SUBJ_CAT_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'SUBJ_CAT_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; + INSERT INTO SERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS ( VARIABLE_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2735,14 +2735,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS PRIMARY KEY (VARIABLE_ID) ) USING row OPTIONS(partition_by 'VARIABLE_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; + INSERT INTO SERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_ACCOUNTING_CODES; -DROP TABLE IF EXISTS staging_TMGSERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS SERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS staging_SERVICES_ACCOUNTING_CODES; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_ACCOUNTING_CODES - USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/TMGSERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_ACCOUNTING_CODES + USING com.databricks.spark.csv OPTIONS (path ':dataFilesLocationRow/SERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_ACCOUNTING_CODES ( ACCOUNTING_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2765,7 +2765,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (ACCOUNTING_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'ACCOUNTING_CODE_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_ACCOUNTING_CODES SELECT * FROM staging_TMGSERVICES_ACCOUNTING_CODES; + INSERT INTO SERVICES_ACCOUNTING_CODES SELECT * FROM staging_SERVICES_ACCOUNTING_CODES; DROP TABLE IF EXISTS UNAPPLIED_CASH; DROP TABLE IF EXISTS staging_UNAPPLIED_CASHE; diff --git a/dtests/src/resources/scripts/cdcConnector/createAndLoadCdcCol50Tables.sql b/dtests/src/resources/scripts/cdcConnector/createAndLoadCdcCol50Tables.sql index 500ef1fc00..201caa4a77 100644 --- a/dtests/src/resources/scripts/cdcConnector/createAndLoadCdcCol50Tables.sql +++ b/dtests/src/resources/scripts/cdcConnector/createAndLoadCdcCol50Tables.sql @@ -1,206 +1,206 @@ CREATE EXTERNAL TABLE staging_PERSON_EVENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/PERSON_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/PERSON_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE PERSON_EVENT USING column OPTIONS(partition_by 'PRSN_EVNT_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_EVNT_ID ') AS (SELECT * FROM staging_PERSON_EVENT); CREATE EXTERNAL TABLE staging_PERSON_EVENT_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/PERSON_EVENT_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/PERSON_EVENT_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE PERSON_EVENT_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_EVNT_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_EVNT_ID,PRSN_EVNT_ATTR_ID ') AS (SELECT * FROM staging_PERSON_EVENT_ATTRIBUTE); CREATE EXTERNAL TABLE staging_CLAIM_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CLAIM_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CLAIM_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_STATUS USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_STAT_ID ') AS (SELECT * FROM staging_CLAIM_STATUS); CREATE EXTERNAL TABLE staging_CLAIM_ADDITIONAL_DIAGNOSIS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CLAIM_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CLAIM_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ADD_DIAG_ID ') AS (SELECT * FROM staging_CLAIM_ADDITIONAL_DIAGNOSIS); CREATE EXTERNAL TABLE staging_CLAIM_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CLAIM_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CLAIM_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_DETAIL USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_DTL_ID ') AS (SELECT * FROM staging_CLAIM_DETAIL); CREATE EXTERNAL TABLE staging_CLAIM_PAYMENT_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CLAIM_PAYMENT_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CLAIM_PAYMENT_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_PAYMENT_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_ID,CLM_PAY_DTL_ID ') AS (SELECT * FROM staging_CLAIM_PAYMENT_DETAIL); CREATE EXTERNAL TABLE staging_CLAIM_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CLAIM_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CLAIM_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ATTR_ID ') AS (SELECT * FROM staging_CLAIM_ATTRIBUTE); CREATE EXTERNAL TABLE staging_CLAIM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CLAIM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CLAIM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID ') AS (SELECT * FROM staging_CLAIM); CREATE EXTERNAL TABLE staging_PERSON_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/PERSON_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/PERSON_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE PERSON_CONTACT USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CNTC_ID,PRSN_CNTC_ID ') AS (SELECT * FROM staging_PERSON_CONTACT); CREATE EXTERNAL TABLE staging_ORGANIZATION_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/ORGANIZATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/ORGANIZATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE ORGANIZATION_CODE USING column OPTIONS(partition_by 'ORG_ID', buckets '32',key_columns 'CLIENT_ID,ORG_ID,CD_VAL_ID,ORG_CD_ID ') AS (SELECT * FROM staging_ORGANIZATION_CODE); CREATE EXTERNAL TABLE staging_COMPLAINT_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/COMPLAINT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/COMPLAINT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE COMPLAINT_STATUS USING column OPTIONS(partition_by 'INQ_ID',buckets '32',key_columns 'CLIENT_ID,INQ_ID,COMPLAINT_ID,COMPLAINT_STAT_ID ' ) AS (SELECT * FROM staging_COMPLAINT_STATUS); CREATE EXTERNAL TABLE staging_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CONTACT USING column OPTIONS(partition_by 'CNTC_ID', buckets '32',key_columns 'CLIENT_ID,CNTC_ID' ) AS (SELECT * FROM staging_CONTACT); CREATE EXTERNAL TABLE staging_CLAIM_PAYMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CLAIM_PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CLAIM_PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_PAYMENT USING column OPTIONS(partition_by 'CLM_PAY_ID', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT); CREATE EXTERNAL TABLE staging_TOPIC_COMMUNICATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/TOPIC_COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/TOPIC_COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE TOPIC_COMMUNICATION USING column OPTIONS(partition_by 'CMCN_INQ_ID', buckets '32',key_columns ' CLIENT_ID,CMCN_INQ_ID,TPC_INQ_ID,CMCN_ID,TPC_ID' ) AS (SELECT * FROM staging_TOPIC_COMMUNICATION); CREATE EXTERNAL TABLE staging_CONTACT_TYPE_CONTACT -USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CONTACT_TYPE_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CONTACT_TYPE_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CONTACT_TYPE_CONTACT USING column OPTIONS(partition_by 'CNTC_ID', buckets '32',key_columns 'CLIENT_ID,CNTC_ID,ORG_CNTC_TYP_ID,CNTC_TYP_CNTC_ID ' ) AS (SELECT * FROM staging_CONTACT_TYPE_CONTACT); CREATE EXTERNAL TABLE staging_TOPIC - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/TOPIC.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/TOPIC.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE TOPIC USING column OPTIONS(partition_by 'INQ_ID',buckets '32',key_columns 'CLIENT_ID,INQ_ID,TPC_ID ' ) AS (SELECT * FROM staging_TOPIC); ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE staging_LINE_ADDITIONAL_DIAGNOSIS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/LINE_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/LINE_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE LINE_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_ADD_DIAG_ID,LN_ADD_DIAG_ID ' ) AS (SELECT * FROM staging_LINE_ADDITIONAL_DIAGNOSIS); CREATE EXTERNAL TABLE staging_PROCEDURE_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/PROCEDURE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/PROCEDURE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE PROCEDURE_CODE USING column OPTIONS(partition_by 'PR_CD_ID', buckets '32',key_columns 'CLIENT_ID,PR_CD_ID ' ) AS (SELECT * FROM staging_PROCEDURE_CODE); CREATE EXTERNAL TABLE staging_CODE_VALUE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/CODE_VALUE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/CODE_VALUE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CODE_VALUE USING column OPTIONS(partition_by 'CD_VAL_ID', buckets '32',key_columns 'CLIENT_ID,CD_VAL_ID ' ) AS (SELECT * FROM staging_CODE_VALUE); CREATE EXTERNAL TABLE staging_POSTAL_ADDRESS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/DataGenerator/POSTAL_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/DataGenerator/POSTAL_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE POSTAL_ADDRESS USING column OPTIONS(partition_by 'CNTC_ID',key_columns 'CLIENT_ID,CNTC_ID,PSTL_ADDR_ID') AS (SELECT * FROM staging_POSTAL_ADDRESS); ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE staging_ADJUSTMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ADJUSTMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ADJUSTMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE ADJUSTMENT USING column OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,ADJ_ID ' ) AS (SELECT * FROM staging_ADJUSTMENT); CREATE EXTERNAL TABLE staging_AGREEMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE AGREEMENT USING column OPTIONS(partition_by 'AGREE_ID', buckets '32',key_columns 'CLIENT_ID,AGREE_ID ' ) AS (SELECT * FROM staging_AGREEMENT); CREATE EXTERNAL TABLE staging_BANK_ACCOUNT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BANK_ACCOUNT USING column OPTIONS(partition_by 'BNK_ORG_ID',buckets '32',key_columns 'CLIENT_ID,BNK_ORG_ID,BNK_ID,BNK_ACCT_ID ' ) AS (SELECT * FROM staging_BANK_ACCOUNT); CREATE EXTERNAL TABLE staging_BANK - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BANK.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BANK.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BANK USING column OPTIONS(partition_by 'BNK_ORG_ID', buckets '32',key_columns 'CLIENT_ID,BNK_ORG_ID,BNK_ID ' ) AS (SELECT * FROM staging_BANK); CREATE EXTERNAL TABLE staging_BENEFIT_GROUP_NAME - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BENEFIT_GROUP_NAME USING column OPTIONS(partition_by 'GRP_ID', buckets '32',key_columns 'CLIENT_ID,GRP_ID,BENE_GRP_ID,BENE_GRP_NM_ID ' ) AS (SELECT * FROM staging_BENEFIT_GROUP_NAME); CREATE EXTERNAL TABLE staging_BENEFIT_GROUPS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BENEFIT_GROUPS USING column OPTIONS(partition_by 'GRP_ID', buckets '32',key_columns 'CLIENT_ID,GRP_ID,BENE_PKG_ID,BENE_GRP_ID ' ) AS (SELECT * FROM staging_BENEFIT_GROUPS); CREATE EXTERNAL TABLE staging_BENEFIT_PACKAGE_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BENEFIT_PACKAGE_ATTRIBUTE USING column OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',key_columns 'CLIENT_ID,BENE_PKG_ID,BENE_PKG_ATTR_ID ' ) AS (SELECT * FROM staging_BENEFIT_PACKAGE_ATTRIBUTE); CREATE EXTERNAL TABLE staging_BENEFIT_PACKAGE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_PACKAGE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_PACKAGE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BENEFIT_PACKAGE USING column OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',key_columns 'CLIENT_ID,BENE_PKG_ID' ) AS (SELECT * FROM staging_BENEFIT_PACKAGE); CREATE EXTERNAL TABLE staging_BENEFIT_PACKAGE_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BENEFIT_PACKAGE_RELATION USING column OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',key_columns 'CLIENT_ID,BENE_PKG_ID,PKG_RELN_ID ' ) AS (SELECT * FROM staging_BENEFIT_PACKAGE_RELATION); CREATE EXTERNAL TABLE staging_BILLING_ENTITY_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BILLING_ENTITY_CONTACT USING column OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,CNTC_ID,BILL_ENT_CNTC_ID ' ) AS (SELECT * FROM staging_BILLING_ENTITY_CONTACT); CREATE EXTERNAL TABLE staging_BILLING_ENTITY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BILLING_ENTITY USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID') AS (SELECT * FROM staging_BILLING_ENTITY); CREATE EXTERNAL TABLE staging_BILLING_ENTITY_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BILLING_ENTITY_DETAIL USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID ' ) AS (SELECT * FROM staging_BILLING_ENTITY_DETAIL); CREATE EXTERNAL TABLE staging_BILLING_ENTITY_SCHEDULE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BILLING_ENTITY_SCHEDULE USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,BILL_SCHD_ID,BILL_ENT_SCHD_ID ' ) AS (SELECT * FROM staging_BILLING_ENTITY_SCHEDULE); CREATE EXTERNAL TABLE staging_BILLING_RECONCILIATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BILLING_RECONCILIATION USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,BILL_RECON_ID ' ) AS (SELECT * FROM staging_BILLING_RECONCILIATION); CREATE EXTERNAL TABLE staging_BILLING_SCHEDULE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BILLING_SCHEDULE USING column OPTIONS(partition_by 'BILL_SCHD_ID', buckets '32',key_columns 'CLIENT_ID,BILL_SCHD_ID ' ) AS (SELECT * FROM staging_BILLING_SCHEDULE); CREATE EXTERNAL TABLE staging_BILLING_SOURCE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE BILLING_SOURCE USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,SRC_TYP_REF_ID,BILL_SRC_ID ' ) AS (SELECT * FROM staging_BILLING_SOURCE); CREATE EXTERNAL TABLE staging_CHARGE_ITEM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CHARGE_ITEM USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,BILL_ENT_SCHD_ID,CHRG_ITM_ID ' ) AS (SELECT * FROM staging_CHARGE_ITEME); CREATE EXTERNAL TABLE staging_CHECKS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CHECKS USING column OPTIONS(partition_by 'CLM_PAY_ID', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_ID,CHK_ID ' ) AS (SELECT * FROM staging_CHECKS); CREATE EXTERNAL TABLE staging_CHECK_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CHECK_STATUS USING column OPTIONS(partition_by 'CLM_PAY_ID',buckets '32',key_columns 'CLIENT_ID,CLM_PAY_ID,CHK_ID,CHK_STAT_ID ' ) AS (SELECT * FROM staging_CHECK_STATUS); CREATE EXTERNAL TABLE staging_CLAIM_COB - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_COB USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_COB_ID ' ) AS (SELECT * FROM staging_CLAIM_COB); CREATE EXTERNAL TABLE staging_CLAIM_COSHARE_TRACKING - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_COSHARE_TRACKING USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLAIM_ID,LINE_NO ' ) AS (SELECT * FROM staging_CLAIM_COSHARE_TRACKING); CREATE EXTERNAL TABLE staging_CLAIM_HOSPITAL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_HOSPITAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_HOSPITAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_HOSPITAL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_HOSP_ID ' ) AS (SELECT * FROM staging_CLAIM_HOSPITAL); CREATE EXTERNAL TABLE staging_CLAIM_LINE_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_LINE_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_DTL_ID,CLM_LN_ATTR_ID ' ) AS (SELECT * FROM staging_CLAIM_LINE_ATTRIBUTE); CREATE EXTERNAL TABLE staging_CLAIM_PAYMENT_REDUCTION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_PAYMENT_REDUCTION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_PAYMENT_REDUCTION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_PAYMENT_REDUCTION USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT_REDUCTION); CREATE EXTERNAL TABLE staging_CLAIM_REDUCTION_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_REDUCTION_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_REDUCTION_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_REDUCTION_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_DETAIL); CREATE EXTERNAL TABLE staging_CLAIM_REDUCTION_HISTORY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_REDUCTION_HISTORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_REDUCTION_HISTORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLAIM_REDUCTION_HISTORY USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_HIST_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_HISTORY); CREATE EXTERNAL TABLE staging_CLIENT_REFERENCE_DATA - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLIENT_REFERENCE_DATA USING column OPTIONS(partition_by 'CLIENT_REF_DTA_ID', buckets '32',key_columns 'CLIENT_ID,CLIENT_REF_DTA_ID ' ) AS (SELECT * FROM staging_CLIENT_REFERENCE_DATA); CREATE EXTERNAL TABLE staging_CLIENTS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLIENTS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLIENTS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE CLIENTS USING column OPTIONS(partition_by 'CLIENT_ID', buckets '32',key_columns 'CLIENT_ID ' ) AS (SELECT * FROM staging_CLIENTS); CREATE EXTERNAL TABLE staging_COB_CLAIM_DIAGNOSIS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE COB_CLAIM_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,PRSN_COB_ID,REC_ORD ' ) AS (SELECT * FROM staging_COB_CLAIM_DIAGNOSIS); CREATE EXTERNAL TABLE staging_COB_ORGANIZATION_PERSON - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE COB_ORGANIZATION_PERSON USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,PRSN_COB_ID,ORG_PRSN_TYP_REF_ID ' ) AS (SELECT * FROM staging_COB_ORGANIZATION_PERSON); diff --git a/dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql b/dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql index 584d1f2381..189bec01f9 100644 --- a/dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql +++ b/dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql @@ -1,5 +1,5 @@ CREATE EXTERNAL TABLE IF NOT EXISTS staging_INQUIRY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INQUIRY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/INQUIRY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INQUIRY( INQ_ID BIGINT NOT NULL, VER BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -15,7 +15,7 @@ USING row OPTIONS(partition_by 'INQ_ID', buckets '32',redundancy '1'); INSERT INTO INQUIRY SELECT * FROM staging_INQUIRY; CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/ORGANIZATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATIONS(ORG_ID BIGINT NOT NULL, VER BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -35,7 +35,7 @@ USING row OPTIONS(partition_by 'ORG_ID', buckets '32',redundancy '1'); INSERT INTO ORGANIZATIONS SELECT * FROM staging_ORGANIZATIONS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_REVIEW - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_REVIEW(UM_RVW_ID BIGINT NOT NULL, VER BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -82,7 +82,7 @@ INSERT INTO UM_REVIEW SELECT * FROM staging_UM_REVIEW; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/INVOICE_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE_DETAIL( INV_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -109,7 +109,7 @@ INSERT INTO INVOICE_DETAIL SELECT * FROM staging_INVOICE_DETAIL; ---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUP_NAME - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_GROUP_NAME( GRP_ID BIGINT NOT NULL, BENE_GRP_ID BIGINT NOT NULL, @@ -131,7 +131,7 @@ CREATE TABLE IF NOT EXISTS BENEFIT_GROUP_NAME( ---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUPS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_GROUPS( GRP_ID BIGINT NOT NULL, BENE_PKG_ID BIGINT NOT NULL, @@ -152,7 +152,7 @@ CREATE TABLE IF NOT EXISTS BENEFIT_GROUPS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_RECONCILIATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_RECONCILIATION( BILL_ENT_ID BIGINT NOT NULL, BILL_RECON_ID BIGINT NOT NULL, @@ -175,7 +175,7 @@ CREATE TABLE IF NOT EXISTS BILLING_RECONCILIATION( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECKS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHECKS( CLM_PAY_ID BIGINT NOT NULL, CHK_ID BIGINT NOT NULL, @@ -199,7 +199,7 @@ CREATE TABLE IF NOT EXISTS CHECKS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLIENT_REFERENCE_DATA - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLIENT_REFERENCE_DATA( CLIENT_REF_DTA_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -220,7 +220,7 @@ CREATE TABLE IF NOT EXISTS CLIENT_REFERENCE_DATA( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_CLAIM_DIAGNOSIS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COB_CLAIM_DIAGNOSIS( PRSN_ID BIGINT NOT NULL, PRSN_COB_ID BIGINT NOT NULL, @@ -240,7 +240,7 @@ CREATE TABLE IF NOT EXISTS COB_CLAIM_DIAGNOSIS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_ORGANIZATION_PERSON - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COB_ORGANIZATION_PERSON( PRSN_ID BIGINT NOT NULL, PRSN_COB_ID BIGINT NOT NULL, @@ -271,7 +271,7 @@ INSERT INTO COB_ORGANIZATION_PERSON SELECT * FROM staging_COB_ORGANIZATION_PERSO ---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_DIAGNOSIS_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/DIAGNOSIS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/DIAGNOSIS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS DIAGNOSIS_CODE( DIAG_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -295,7 +295,7 @@ CREATE TABLE IF NOT EXISTS DIAGNOSIS_CODE( ---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ELECTRONIC_ADDRESS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ELECTRONIC_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/ELECTRONIC_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ELECTRONIC_ADDRESS( CNTC_ID BIGINT NOT NULL, ELEC_ADDR_ID BIGINT NOT NULL, @@ -315,7 +315,7 @@ CREATE TABLE IF NOT EXISTS ELECTRONIC_ADDRESS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ENTITY_BANK_ACCOUNT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ENTITY_BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/ENTITY_BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ENTITY_BANK_ACCOUNT( bill_ent_id bigint NOT NULL, bnk_acct_id bigint NOT NULL, @@ -334,7 +334,7 @@ CREATE TABLE IF NOT EXISTS ENTITY_BANK_ACCOUNT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ETL_METADATA - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ETL_METADATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/ETL_METADATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ETL_METADATA( MAIN_TBL VARCHAR(15) NOT NULL, TGT_TBL VARCHAR(15) NOT NULL, @@ -357,7 +357,7 @@ CREATE TABLE IF NOT EXISTS ETL_METADATA( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT( EXHIBIT_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -373,7 +373,7 @@ CREATE TABLE IF NOT EXISTS EXHIBIT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXPLANATION_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXPLANATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/EXPLANATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXPLANATION_CODE( EXPLAIN_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -400,7 +400,7 @@ CREATE TABLE IF NOT EXISTS EXPLANATION_CODE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_RUN - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FILE_TRANSFER_RUN.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/FILE_TRANSFER_RUN.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FILE_TRANSFER_RUN( FL_TRANS_RUN_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -432,7 +432,7 @@ CREATE TABLE IF NOT EXISTS FILE_TRANSFER_RUN( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_STAT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FILE_TRANSFER_STAT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/FILE_TRANSFER_STAT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FILE_TRANSFER_STAT( FL_TRANS_STAT_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -469,7 +469,7 @@ CREATE TABLE IF NOT EXISTS FILE_TRANSFER_STAT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GENERAL_LEDGER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/GENERAL_LEDGER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GENERAL_LEDGER( BILL_ENT_ID BIGINT NOT NULL, GL_ID BIGINT NOT NULL, @@ -494,7 +494,7 @@ CREATE TABLE IF NOT EXISTS GENERAL_LEDGER( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_GROUP_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GROUP_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/GROUP_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GROUP_RELATION( GRP_ID BIGINT NOT NULL, GRP_RELN_ID BIGINT NOT NULL, @@ -512,7 +512,7 @@ CREATE TABLE IF NOT EXISTS GROUP_RELATION( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LEP_APPEAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LEP_APPEAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LEP_APPEAL( PRSN_ID BIGINT NOT NULL, LEP_APL_ID BIGINT NOT NULL, @@ -532,7 +532,7 @@ CREATE TABLE IF NOT EXISTS LEP_APPEAL( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER( PRSN_ID BIGINT NOT NULL, LTR_ID BIGINT NOT NULL, @@ -548,7 +548,7 @@ CREATE TABLE IF NOT EXISTS LETTER( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_DISALLOW_EXPLANATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LINE_DISALLOW_EXPLANATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LINE_DISALLOW_EXPLANATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LINE_DISALLOW_EXPLANATION( PRSN_ID BIGINT NOT NULL, CLM_ID BIGINT NOT NULL, @@ -569,7 +569,7 @@ CREATE TABLE IF NOT EXISTS LINE_DISALLOW_EXPLANATION( ---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_PROCEDURE_MODIFIER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LINE_PROCEDURE_MODIFIER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LINE_PROCEDURE_MODIFIER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LINE_PROCEDURE_MODIFIER( PRSN_ID BIGINT NOT NULL, CLM_ID BIGINT NOT NULL, @@ -589,7 +589,7 @@ CREATE TABLE IF NOT EXISTS LINE_PROCEDURE_MODIFIER( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MARX_CALENDAR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MARX_CALENDAR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/MARX_CALENDAR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MARX_CALENDAR( MARX_CAL_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -609,7 +609,7 @@ CREATE TABLE IF NOT EXISTS MARX_CALENDAR( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_NOTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/NOTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/NOTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS NOTE( INQ_ID BIGINT NOT NULL, SRC_ID BIGINT NOT NULL, @@ -632,7 +632,7 @@ CREATE TABLE IF NOT EXISTS NOTE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_CONTACT_TYPE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATION_CONTACT_TYPE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/ORGANIZATION_CONTACT_TYPE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATION_CONTACT_TYPE( ORG_ID BIGINT NOT NULL, CNTC_TYP_REF_ID BIGINT NOT NULL, @@ -649,7 +649,7 @@ CREATE TABLE IF NOT EXISTS ORGANIZATION_CONTACT_TYPE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PAYMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PAYMENT( BILL_ENT_ID BIGINT NOT NULL, PAY_ID BIGINT NOT NULL, @@ -675,7 +675,7 @@ CREATE TABLE IF NOT EXISTS PAYMENT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_COB - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_COB( PRSN_ID BIGINT NOT NULL, PRSN_COB_ID BIGINT NOT NULL, @@ -725,7 +725,7 @@ CREATE TABLE IF NOT EXISTS PERSON_COB( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_CODE( PRSN_ID BIGINT NOT NULL, CD_VAL_ID BIGINT NOT NULL, @@ -745,7 +745,7 @@ CREATE TABLE IF NOT EXISTS PERSON_CODE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_EVENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_LEP_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_EVENT( PRSN_ID BIGINT NOT NULL, PRSN_LEP_EVNT_ID BIGINT NOT NULL, @@ -766,7 +766,7 @@ CREATE TABLE IF NOT EXISTS PERSON_LEP_EVENT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_LEP_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE( PRSN_ID BIGINT NOT NULL, PRSN_LEP_PRFL_ID BIGINT NOT NULL, @@ -785,7 +785,7 @@ CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ORGANIZATION_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ORGANIZATION_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_ORGANIZATION_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ORGANIZATION_RELATION( PRSN_ID BIGINT NOT NULL, ORG_ID BIGINT NOT NULL, @@ -806,7 +806,7 @@ CREATE TABLE IF NOT EXISTS PERSON_ORGANIZATION_RELATION( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_PAYMENT_OPTIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_PAYMENT_OPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_PAYMENT_OPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_PAYMENT_OPTIONS( PRSN_ID BIGINT NOT NULL, PRSN_PAY_OPTN_ID BIGINT NOT NULL, @@ -827,7 +827,7 @@ CREATE TABLE IF NOT EXISTS PERSON_PAYMENT_OPTIONS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_RELATION( PRSN_ID BIGINT NOT NULL, RLTD_PRSN_ID BIGINT NOT NULL, @@ -848,7 +848,7 @@ CREATE TABLE IF NOT EXISTS PERSON_RELATION( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ROLE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ROLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_ROLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ROLE( PRSN_ID BIGINT NOT NULL, PRSN_TYP_ID BIGINT NOT NULL, @@ -868,7 +868,7 @@ CREATE TABLE IF NOT EXISTS PERSON_ROLE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_SUBSIDY_PROFILE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_SUBSIDY_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_SUBSIDY_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_SUBSIDY_PROFILE( PRSN_ID BIGINT NOT NULL, PRSN_SBSDY_PRFL_ID BIGINT NOT NULL, @@ -890,7 +890,7 @@ CREATE TABLE IF NOT EXISTS PERSON_SUBSIDY_PROFILE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_WORK_ITEM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_WORK_ITEM( PRSN_ID BIGINT NOT NULL, PRSN_WRK_ITM_ID BIGINT NOT NULL, @@ -912,7 +912,7 @@ CREATE TABLE IF NOT EXISTS PERSON_WORK_ITEM( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PHONE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PHONE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PHONE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PHONE( CNTC_ID BIGINT NOT NULL, PHN_ID BIGINT NOT NULL, @@ -933,7 +933,7 @@ CREATE TABLE IF NOT EXISTS PHONE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PLAN_MEMBERSHIPS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PLAN_MEMBERSHIPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PLAN_MEMBERSHIPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PLAN_MEMBERSHIPS( PRSN_ID BIGINT NOT NULL, BENE_GRP_ID BIGINT NOT NULL, @@ -959,7 +959,7 @@ CREATE TABLE IF NOT EXISTS PLAN_MEMBERSHIPS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_POS_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/POS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/POS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS POS_CODE( POS_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -980,7 +980,7 @@ CREATE TABLE IF NOT EXISTS POS_CODE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PREMIUM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM( GRP_ID BIGINT NOT NULL, PREM_CAT_ID BIGINT NOT NULL, @@ -1003,7 +1003,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_REFERENCE_DATA - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS REFERENCE_DATA( REF_DTA_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1019,7 +1019,7 @@ CREATE TABLE IF NOT EXISTS REFERENCE_DATA( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_RETURNED_MAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/RETURNED_MAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/RETURNED_MAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS RETURNED_MAIL( RTRN_MAIL_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1040,7 +1040,7 @@ CREATE TABLE IF NOT EXISTS RETURNED_MAIL( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_ACTIVITY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_ACTIVITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_ACTIVITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_ACTIVITY( UM_RVW_ID BIGINT NOT NULL, UM_ACTY_ID BIGINT NOT NULL, @@ -1068,7 +1068,7 @@ CREATE TABLE IF NOT EXISTS UM_ACTIVITY( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_LINE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_DIAGNOSIS_LINE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_DIAGNOSIS_LINE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_LINE( UM_RVW_ID BIGINT NOT NULL, UM_DIAG_SET_ID BIGINT NOT NULL, @@ -1090,7 +1090,7 @@ CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_LINE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_SET - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_DIAGNOSIS_SET.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_DIAGNOSIS_SET.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_SET( UM_RVW_ID BIGINT NOT NULL, UM_DIAG_SET_ID BIGINT NOT NULL, @@ -1110,7 +1110,7 @@ CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_SET( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_RELATION( BENE_PKG_ID BIGINT NOT NULL, PKG_RELN_ID BIGINT NOT NULL, @@ -1128,7 +1128,7 @@ CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_RELATION( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_CONTACT( BILL_ENT_ID BIGINT NOT NULL, CNTC_ID BIGINT NOT NULL, @@ -1149,7 +1149,7 @@ CREATE TABLE IF NOT EXISTS BILLING_ENTITY_CONTACT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_DETAIL( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -1167,7 +1167,7 @@ CREATE TABLE IF NOT EXISTS BILLING_ENTITY_DETAIL( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECK_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHECK_STATUS( CLM_PAY_ID BIGINT NOT NULL, CHK_ID BIGINT NOT NULL, @@ -1189,7 +1189,7 @@ CREATE TABLE IF NOT EXISTS CHECK_STATUS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT_GROUP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/EXHIBIT_GROUP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP( EXHIBIT_GRP_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1211,7 +1211,7 @@ CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP_EXHIBIT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT_GROUP_EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/EXHIBIT_GROUP_EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP_EXHIBIT( EXHIBIT_ID BIGINT NOT NULL, EXHIBIT_GRP_ID BIGINT NOT NULL, @@ -1231,7 +1231,7 @@ CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP_EXHIBIT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER_MAP - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GENERAL_LEDGER_MAP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/GENERAL_LEDGER_MAP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GENERAL_LEDGER_MAP( GL_MAP_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1251,7 +1251,7 @@ CREATE TABLE IF NOT EXISTS GENERAL_LEDGER_MAP( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL_DECISION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LEP_APPEAL_DECISION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LEP_APPEAL_DECISION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LEP_APPEAL_DECISION( PRSN_ID BIGINT NOT NULL, LEP_APL_ID BIGINT NOT NULL, @@ -1273,7 +1273,7 @@ CREATE TABLE IF NOT EXISTS LEP_APPEAL_DECISION( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LETTER_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_DETAIL( PRSN_ID BIGINT NOT NULL, LTR_ID BIGINT NOT NULL, @@ -1290,7 +1290,7 @@ CREATE TABLE IF NOT EXISTS LETTER_DETAIL( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_JOB - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_JOB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LETTER_JOB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_JOB( PRSN_ID BIGINT NOT NULL, LTR_DTL_ID BIGINT NOT NULL, @@ -1314,7 +1314,7 @@ CREATE TABLE IF NOT EXISTS LETTER_JOB( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_REGISTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_REGISTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LETTER_REGISTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_REGISTER( PRSN_ID BIGINT NOT NULL, LTR_ID BIGINT NOT NULL, @@ -1337,7 +1337,7 @@ CREATE TABLE IF NOT EXISTS LETTER_REGISTER( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_WORK_ITEM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/LETTER_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_WORK_ITEM( PRSN_ID BIGINT NOT NULL, LTR_RGSTR_ID BIGINT NOT NULL, @@ -1356,7 +1356,7 @@ CREATE TABLE IF NOT EXISTS LETTER_WORK_ITEM( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_EVENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_EVENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS( PRSN_EVNT_ID BIGINT NOT NULL, PRSN_EVNT_STAT_ID BIGINT NOT NULL, @@ -1375,7 +1375,7 @@ CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS_REASON - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_EVENT_STATUS_REASON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_EVENT_STATUS_REASON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS_REASON( PRSN_EVNT_STAT_ID BIGINT NOT NULL, PRSN_EVNT_STAT_RSN_ID BIGINT NOT NULL, @@ -1393,7 +1393,7 @@ CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS_REASON( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE_RECORD - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_PROFILE_RECORD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_LEP_PROFILE_RECORD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE_RECORD( PRSN_ID BIGINT NOT NULL, PRSN_LEP_PRFL_ID BIGINT NOT NULL, @@ -1422,7 +1422,7 @@ CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE_RECORD( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_CATEGORY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_CATEGORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PREMIUM_CATEGORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_CATEGORY( GRP_ID BIGINT NOT NULL, PREM_CAT_ID BIGINT NOT NULL, @@ -1442,7 +1442,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM_CATEGORY( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_PART - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_PART.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PREMIUM_PART.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_PART( GRP_ID BIGINT NOT NULL, PREM_PART_ID BIGINT NOT NULL, @@ -1464,7 +1464,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM_PART( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_TABLE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_TABLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PREMIUM_TABLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_TABLE( GRP_ID BIGINT NOT NULL, BENE_GRP_ID BIGINT NOT NULL, @@ -1485,7 +1485,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM_TABLE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_REVIEW - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_INPATIENT_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_REVIEW( UM_RVW_ID BIGINT NOT NULL, UM_INPT_RVW_ID BIGINT NOT NULL, @@ -1513,7 +1513,7 @@ CREATE TABLE IF NOT EXISTS UM_INPATIENT_REVIEW( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_INPATIENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_STATUS( UM_RVW_ID BIGINT NOT NULL, UM_INPT_STAT BIGINT NOT NULL, @@ -1536,7 +1536,7 @@ CREATE TABLE IF NOT EXISTS UM_INPATIENT_STATUS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_SERVICE_OVERRIDE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_SERVICE_OVERRIDE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_SERVICE_OVERRIDE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_SERVICE_OVERRIDE( UM_RVW_ID BIGINT NOT NULL, UM_SERV_ID BIGINT NOT NULL, @@ -1562,7 +1562,7 @@ INSERT INTO UM_SERVICE_OVERRIDE SELECT * FROM staging_UM_SERVICE_OVERRIDE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BANK_ACCOUNT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BANK_ACCOUNT( BNK_ORG_ID BIGINT NOT NULL, BNK_ID BIGINT NOT NULL, @@ -1583,7 +1583,7 @@ CREATE TABLE IF NOT EXISTS BANK_ACCOUNT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STAY_LENGTH - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_STAY_LENGTH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UM_INPATIENT_STAY_LENGTH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_STAY_LENGTH( UM_RVW_ID BIGINT NOT NULL, UM_INPT_RVW_ID BIGINT NOT NULL, @@ -1617,7 +1617,7 @@ CREATE TABLE IF NOT EXISTS UM_INPATIENT_STAY_LENGTH( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_REVENUE_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/REVENUE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/REVENUE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS REVENUE_CODE( REV_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1639,7 +1639,7 @@ CREATE TABLE IF NOT EXISTS REVENUE_CODE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICE_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/SERVICE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS SERVICE_CODE( SERV_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1658,7 +1658,7 @@ CREATE TABLE IF NOT EXISTS SERVICE_CODE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_AGREEMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS AGREEMENT( AGREE_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1677,7 +1677,7 @@ CREATE TABLE IF NOT EXISTS AGREEMENT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_EVENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATION_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/ORGANIZATION_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATION_EVENT( ORG_ID BIGINT NOT NULL, ORG_EVNT_ID BIGINT NOT NULL, @@ -1698,7 +1698,7 @@ CREATE TABLE IF NOT EXISTS ORGANIZATION_EVENT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_IDCARD - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_TX_IDCARD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/FDI_TX_IDCARD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_TX_IDCARD ( ACN_ID BIGINT NOT NULL, @@ -1736,7 +1736,7 @@ CREATE TABLE IF NOT EXISTS FDI_TX_IDCARD ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_LETTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_TX_LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/FDI_TX_LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_TX_LETTER ( ACN_ID BIGINT NOT NULL, @@ -1770,7 +1770,7 @@ CREATE TABLE IF NOT EXISTS FDI_TX_LETTER ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_ATTRIBUTE( BENE_PKG_ID BIGINT NOT NULL, BENE_PKG_ATTR_ID BIGINT NOT NULL, @@ -1789,7 +1789,7 @@ CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_ATTRIBUTE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_SCHEDULE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_SCHEDULE( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -1811,7 +1811,7 @@ CREATE TABLE IF NOT EXISTS BILLING_ENTITY_SCHEDULE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SCHEDULE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_SCHEDULE( BILL_SCHD_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -1831,7 +1831,7 @@ CREATE TABLE IF NOT EXISTS BILLING_SCHEDULE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SOURCE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_SOURCE( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -1849,7 +1849,7 @@ CREATE TABLE IF NOT EXISTS BILLING_SOURCE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHARGE_ITEM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHARGE_ITEM( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -1873,7 +1873,7 @@ CREATE TABLE IF NOT EXISTS CHARGE_ITEM( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_COSHARE_TRACKING - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_COSHARE_TRACKING( PRSN_ID BIGINT NOT NULL, VER BIGINT, @@ -1901,7 +1901,7 @@ CREATE TABLE IF NOT EXISTS CLAIM_COSHARE_TRACKING( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_LINE_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_LINE_ATTRIBUTE( PRSN_ID BIGINT NOT NULL, CLM_DTL_ID BIGINT NOT NULL, @@ -1925,7 +1925,7 @@ CREATE TABLE IF NOT EXISTS CLAIM_LINE_ATTRIBUTE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_CORRESPONDENCE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_CORRESPONDENCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/FDI_CORRESPONDENCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_CORRESPONDENCE ( CLIENT_ID BIGINT NOT NULL, @@ -1946,7 +1946,7 @@ CREATE TABLE IF NOT EXISTS FDI_CORRESPONDENCE ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/INVOICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -1981,7 +1981,7 @@ CREATE TABLE IF NOT EXISTS INVOICE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/INVOICE_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE_STATUS( INV_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2001,7 +2001,7 @@ CREATE TABLE IF NOT EXISTS INVOICE_STATUS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE_EXCEPTIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_BALANCE_EXCEPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/MOOP_BALANCE_EXCEPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_BALANCE_EXCEPTIONS( CLIENT_ID INTEGER NOT NULL, VLD_FRM_DT DATE NOT NULL, @@ -2023,7 +2023,7 @@ CREATE TABLE IF NOT EXISTS MOOP_BALANCE_EXCEPTIONS( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_BALANCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/MOOP_BALANCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_BALANCE( PRSN_ID BIGINT NOT NULL, CLIENT_ID INTEGER NOT NULL, @@ -2047,7 +2047,7 @@ CREATE TABLE IF NOT EXISTS MOOP_BALANCE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_ACCUMULATOR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/MOOP_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_ACCUMULATOR ( SUPPL_ID VARCHAR(15) NOT NULL, CLIENT_ID INTEGER NOT NULL, @@ -2068,7 +2068,7 @@ SUPPL_ID VARCHAR(15) NOT NULL, ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ACCUMULATOR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PERSON_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ACCUMULATOR( PRSN_ID BIGINT NOT NULL, PRSN_ACCUM_ID BIGINT NOT NULL, @@ -2098,7 +2098,7 @@ CREATE TABLE IF NOT EXISTS PERSON_ACCUMULATOR( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PROCEDURE_PRICE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PROCEDURE_PRICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/PROCEDURE_PRICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PROCEDURE_PRICE( PR_CD_ID BIGINT NOT NULL, PR_PRC_ID BIGINT NOT NULL, @@ -2120,7 +2120,7 @@ CREATE TABLE IF NOT EXISTS PROCEDURE_PRICE( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_RECEIPT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/RECEIPT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/RECEIPT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS RECEIPT( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2163,9 +2163,9 @@ CREATE TABLE IF NOT EXISTS RECEIPT( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PLAN_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PLAN_CODE_CONFIG ( PLAN_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2195,13 +2195,13 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (PLAN_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'PLAN_CODE_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_PLAN_CODE_CONFIG; + INSERT INTO SERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_SERVICES_PLAN_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_KEY_GENERATOR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_KEY_GENERATOR + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_KEY_GENERATOR ( KEY_GEN_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2219,13 +2219,13 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (KEY_GEN_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'KEY_GEN_ID,CLIENT_ID', buckets '32',redundancy '1' ); - INSERT INTO TMGSERVICES_KEY_GENERATOR SELECT * FROM staging_TMGSERVICES_KEY_GENERATOR; + INSERT INTO SERVICES_KEY_GENERATOR SELECT * FROM staging_SERVICES_KEY_GENERATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); - CREATE TABLE IF NOT EXISTS TMGSERVICES_GROUP_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_GROUP_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + CREATE TABLE IF NOT EXISTS SERVICES_GROUP_CODE_CONFIG ( GROUP_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2244,12 +2244,12 @@ CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG PRIMARY KEY (GROUP_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'GROUP_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_GROUP_CODE_CONFIG; + INSERT INTO SERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_SERVICES_GROUP_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DNIS_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DNIS_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DNIS_CONFIG ( DNIS_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2268,12 +2268,12 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DNIS_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DNIS_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_DNIS_CONFIG SELECT * FROM staging_TMGSERVICES_DNIS_CONFIG; + INSERT INTO SERVICES_DNIS_CONFIG SELECT * FROM staging_SERVICES_DNIS_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DENIAL_REASON_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DENIAL_REASON_CONFIG ( DENIAL_REASON_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2290,13 +2290,13 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DENIAL_REASON_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DENIAL_REASON_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_TMGSERVICES_DENIAL_REASON_CONFIG; + INSERT INTO SERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_SERVICES_DENIAL_REASON_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_CLIENT_MASTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_CLIENT_MASTER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_CLIENT_MASTER ( CLIENT_REF_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2304,7 +2304,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER CLIENT_LEGACY_CD VARCHAR(5) NULL, CLIENT_NAME VARCHAR(10) NULL, MEMBER_ID_FORMAT VARCHAR(15) NULL, - TMG_CALL_CLIENT_CODE VARCHAR(10) NULL, + CALL_CLIENT_CODE VARCHAR(10) NULL, CREATE_date date NULL, UPDATED_date date NULL, USER_NAME VARCHAR(15) NULL, @@ -2314,12 +2314,12 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (CLIENT_REF_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'CLIENT_REF_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_CLIENT_MASTER SELECT * FROM staging_TMGSERVICES_CLIENT_MASTER; + INSERT INTO SERVICES_CLIENT_MASTER SELECT * FROM staging_SERVICES_CLIENT_MASTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS ( SUBJ_CAT_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2341,13 +2341,13 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (SUBJ_CAT_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'SUBJ_CAT_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; + INSERT INTO SERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS ( VARIABLE_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2365,13 +2365,13 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS PRIMARY KEY (VARIABLE_ID) ) USING row OPTIONS(partition_by 'VARIABLE_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; + INSERT INTO SERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_ACCOUNTING_CODES - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_ACCOUNTING_CODES + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/SERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_ACCOUNTING_CODES ( ACCOUNTING_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2394,12 +2394,12 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (ACCOUNTING_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'ACCOUNTING_CODE_ID,CLIENT_ID', buckets '32',redundancy '1'); - INSERT INTO TMGSERVICES_ACCOUNTING_CODES SELECT * FROM staging_TMGSERVICES_ACCOUNTING_CODES; + INSERT INTO SERVICES_ACCOUNTING_CODES SELECT * FROM staging_SERVICES_ACCOUNTING_CODES; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UNAPPLIED_CASH - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UNAPPLIED_CASH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/UNAPPLIED_CASH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UNAPPLIED_CASH( UNAPP_CSH_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -2420,7 +2420,7 @@ CREATE TABLE IF NOT EXISTS UNAPPLIED_CASH( ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_WORK_GENERATED_KEYS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/WORK_GENERATED_KEYS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data/RowTable_Data/WORK_GENERATED_KEYS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS WORK_GENERATED_KEYS( CLIENT_ID BIGINT NOT NULL, GEN_KEY_ID BIGINT NOT NULL, diff --git a/dtests/src/resources/scripts/cdcConnector/pointLookUpQueries.sql b/dtests/src/resources/scripts/cdcConnector/pointLookUpQueries.sql index 34aab327d2..07f9c851d1 100644 --- a/dtests/src/resources/scripts/cdcConnector/pointLookUpQueries.sql +++ b/dtests/src/resources/scripts/cdcConnector/pointLookUpQueries.sql @@ -115,15 +115,15 @@ SELECT * FROM REFERENCE_DATA WHERE REF_DTA_ID = 42 SELECT * FROM RETURNED_MAIL WHERE RTRN_MAIL_ID = 10000 AND PRSN_ID = 5790; SELECT * FROM REVENUE_CODE WHERE REV_CD_ID = 52011; SELECT * FROM SERVICE_CODE WHERE SERV_CD_ID = 654292; -SELECT * FROM TMGSERVICES_ACCOUNTING_CODES WHERE ACCOUNTING_CODE_ID = 726283 AND CLIENT_ID = 84375; -SELECT * FROM TMGSERVICES_CLIENT_MASTER WHERE CLIENT_REF_ID = 82501 AND CLIENT_ID = 27982; -SELECT * FROM TMGSERVICES_DENIAL_REASON_CONFIG WHERE DENIAL_REASON_ID = 60072 AND CLIENT_REF_ID = 2575092; -SELECT * FROM TMGSERVICES_DNIS_CONFIG WHERE DNIS_ID = 3263 AND CLIENT_REF_ID = 1703380; -SELECT * FROM TMGSERVICES_GROUP_CODE_CONFIG WHERE GROUP_ID = 520494 AND CLIENT_REF_ID = 2020245; -SELECT * FROM TMGSERVICES_KEY_GENERATOR WHERE KEY_GEN_ID = 9218 AND CLIENT_ID = 1464; -SELECT * FROM TMGSERVICES_PLAN_CODE_CONFIG WHERE PLAN_CODE_ID = 13234 AND CLIENT_REF_ID = 3585543; -SELECT * FROM TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS WHERE VARIABLE_ID = 84362 AND CLIENT_ID = 92141; -SELECT * FROM TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS WHERE SUBJ_CAT_ID = 33 AND CLIENT_REF_ID = 3764443; +SELECT * FROM SERVICES_ACCOUNTING_CODES WHERE ACCOUNTING_CODE_ID = 726283 AND CLIENT_ID = 84375; +SELECT * FROM SERVICES_CLIENT_MASTER WHERE CLIENT_REF_ID = 82501 AND CLIENT_ID = 27982; +SELECT * FROM SERVICES_DENIAL_REASON_CONFIG WHERE DENIAL_REASON_ID = 60072 AND CLIENT_REF_ID = 2575092; +SELECT * FROM SERVICES_DNIS_CONFIG WHERE DNIS_ID = 3263 AND CLIENT_REF_ID = 1703380; +SELECT * FROM SERVICES_GROUP_CODE_CONFIG WHERE GROUP_ID = 520494 AND CLIENT_REF_ID = 2020245; +SELECT * FROM SERVICES_KEY_GENERATOR WHERE KEY_GEN_ID = 9218 AND CLIENT_ID = 1464; +SELECT * FROM SERVICES_PLAN_CODE_CONFIG WHERE PLAN_CODE_ID = 13234 AND CLIENT_REF_ID = 3585543; +SELECT * FROM SERVICES_PTMR_VARIABLE_TRANSLATIONS WHERE VARIABLE_ID = 84362 AND CLIENT_ID = 92141; +SELECT * FROM SERVICES_SUBJECT_CATEGORY_TRANSLATIONS WHERE SUBJ_CAT_ID = 33 AND CLIENT_REF_ID = 3764443; SELECT * FROM TOPIC WHERE TPC_ID = 2916442 AND INQ_ID = 2916442; SELECT * FROM TOPIC_COMMUNICATION WHERE TPC_INQ_ID = 10 AND CMCN_INQ_ID = 10; SELECT * FROM UM_ACTIVITY WHERE UM_RVW_ID = 7277 AND UM_ACTY_ID = 24221; diff --git a/dtests/src/resources/scripts/cdcConnector/scriptsForApp1/insert4.sql b/dtests/src/resources/scripts/cdcConnector/scriptsForApp1/insert4.sql index 55a7900abe..17c877707e 100644 --- a/dtests/src/resources/scripts/cdcConnector/scriptsForApp1/insert4.sql +++ b/dtests/src/resources/scripts/cdcConnector/scriptsForApp1/insert4.sql @@ -14,10 +14,10 @@ INSERT INTO [testdatabase].[dbo].[FILE_TRANSFER_STAT] VALUES (?, 4187111, 1, N'I INSERT INTO [testdatabase].[dbo].[RETURNED_MAIL] VALUES (?, 2534, 1, 53370, N'2016-06-13', N'd', N'DnYuOz', N'DVYS', N'2016-07-23', N'2016-06-23', N'6349', N'2342'); INSERT INTO [testdatabase].[dbo].[REVENUE_CODE] VALUES (?, 9197, 1, N'JwTGI', N'2016-07-28', N'2016-06-18', N'vg', N'NSoCMDhm', 1759734, N'2016-06-05', N'2016-05-28', N'5805', N'5614'); INSERT INTO [testdatabase].[dbo].[SERVICE_CODE] VALUES (?, 461, 1, N'xUscj', N'soVG', N'ChaKw', N'2016-07-19', N'2016-05-01', N'1886', N'6019'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_ACCOUNTING_CODES] VALUES (?, 3893554, 8433, 1, N'uGn', N'k', N'CTXhWJ', 'G', N'WCaOs', N'jjU', N'Di', N'dxCmNYg', N'2016-06-25', N'2016-06-08', N'qhqpcexY', N'2016-05-31', N'2016-08-07', N'5603', N'3404'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_CLIENT_MASTER] VALUES (?, 3364, 1, N'jC', N'yTFExSF', N'JiTvgi', N'uYdAL', N'2016-06-16', N'2016-07-25', N'ayXBKO', N'2016-05-24', N'2016-07-14', N'8505', N'594'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_DNIS_CONFIG] VALUES (?, 3552757, 8946, 1, N'ABEqEQlm', N'PuIlxh', N'PiPzxFpxhA', N'yH', N'2016-07-12', N'2016-07-22', N'kCPJlyGVW', N'2016-07-08', N'2016-05-29', N'5652', N'5592'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_GROUP_CODE_CONFIG] VALUES (?, 3521714, 7543, 1, N'QZVT', N'uD', 'U', N'2016-05-15', N'2016-06-25', N'Uilk', N'2016-05-26', N'2016-07-27', N'9676', N'8465'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_KEY_GENERATOR] VALUES (?, 4424588, 1449, 1, N'oKkh', N'Q', N'zHx', N'2016-06-19', N'2016-05-07', N'cpgklMW', N'2016-05-11', N'2016-06-29', N'8219', N'5628'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_PLAN_CODE_CONFIG] VALUES (?, 1218154, 4308, 1, N'eX', N'QY', N'Bgeh', 'E', 'l', 'f', 'K', 'L', 1905702, 2153849, 'S', 'R', 'H', 'O', 'm', N'2016-06-22', N'2016-05-12', N'QIuRtZWFL', N'2016-06-15', N'2016-05-15', N'3589', N'1312'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS] VALUES (?, 7044, 1, N'Z', N'M', N'kCYzL', N'2016-05-13', N'2016-06-16', N'mKhu', N'2016-05-12', N'2016-06-07', N'3396', N'2794'); +INSERT INTO [testdatabase].[dbo].[SERVICES_ACCOUNTING_CODES] VALUES (?, 3893554, 8433, 1, N'uGn', N'k', N'CTXhWJ', 'G', N'WCaOs', N'jjU', N'Di', N'dxCmNYg', N'2016-06-25', N'2016-06-08', N'qhqpcexY', N'2016-05-31', N'2016-08-07', N'5603', N'3404'); +INSERT INTO [testdatabase].[dbo].[SERVICES_CLIENT_MASTER] VALUES (?, 3364, 1, N'jC', N'yTFExSF', N'JiTvgi', N'uYdAL', N'2016-06-16', N'2016-07-25', N'ayXBKO', N'2016-05-24', N'2016-07-14', N'8505', N'594'); +INSERT INTO [testdatabase].[dbo].[SERVICES_DNIS_CONFIG] VALUES (?, 3552757, 8946, 1, N'ABEqEQlm', N'PuIlxh', N'PiPzxFpxhA', N'yH', N'2016-07-12', N'2016-07-22', N'kCPJlyGVW', N'2016-07-08', N'2016-05-29', N'5652', N'5592'); +INSERT INTO [testdatabase].[dbo].[SERVICES_GROUP_CODE_CONFIG] VALUES (?, 3521714, 7543, 1, N'QZVT', N'uD', 'U', N'2016-05-15', N'2016-06-25', N'Uilk', N'2016-05-26', N'2016-07-27', N'9676', N'8465'); +INSERT INTO [testdatabase].[dbo].[SERVICES_KEY_GENERATOR] VALUES (?, 4424588, 1449, 1, N'oKkh', N'Q', N'zHx', N'2016-06-19', N'2016-05-07', N'cpgklMW', N'2016-05-11', N'2016-06-29', N'8219', N'5628'); +INSERT INTO [testdatabase].[dbo].[SERVICES_PLAN_CODE_CONFIG] VALUES (?, 1218154, 4308, 1, N'eX', N'QY', N'Bgeh', 'E', 'l', 'f', 'K', 'L', 1905702, 2153849, 'S', 'R', 'H', 'O', 'm', N'2016-06-22', N'2016-05-12', N'QIuRtZWFL', N'2016-06-15', N'2016-05-15', N'3589', N'1312'); +INSERT INTO [testdatabase].[dbo].[SERVICES_PTMR_VARIABLE_TRANSLATIONS] VALUES (?, 7044, 1, N'Z', N'M', N'kCYzL', N'2016-05-13', N'2016-06-16', N'mKhu', N'2016-05-12', N'2016-06-07', N'3396', N'2794'); diff --git a/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/insert4.sql b/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/insert4.sql index 74d2342497..16119594a9 100644 --- a/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/insert4.sql +++ b/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/insert4.sql @@ -14,10 +14,10 @@ INSERT INTO [testdatabase].[dbo].[FILE_TRANSFER_STAT] VALUES (?, 4187112, 2, N'I INSERT INTO [testdatabase].[dbo].[RETURNED_MAIL] VALUES (?, 2534, 2, 53370, N'2016-06-13', N'd', N'DnYuOz', N'DVYS', N'2016-07-23', N'2016-06-23', N'6349', N'2342'); INSERT INTO [testdatabase].[dbo].[REVENUE_CODE] VALUES (?, 9197, 2, N'JwTGI', N'2016-07-28', N'2016-06-18', N'vg', N'NSoCMDhm', 1759734, N'2016-06-05', N'2016-05-28', N'5805', N'5614'); INSERT INTO [testdatabase].[dbo].[SERVICE_CODE] VALUES (?, 462, 2, N'xUscj', N'soVG', N'ChaKw', N'2016-07-19', N'2016-05-01', N'1886', N'6019'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_ACCOUNTING_CODES] VALUES (?, 3893554, 8433, 2, N'uGn', N'k', N'CTXhWJ', 'G', N'WCaOs', N'jjU', N'Di', N'dxCmNYg', N'2016-06-25', N'2016-06-08', N'qhqpcexY', N'2016-05-31', N'2016-08-07', N'5603', N'3404'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_CLIENT_MASTER] VALUES (?, 3364, 2, N'jC', N'yTFExSF', N'JiTvgi', N'uYdAL', N'2016-06-16', N'2016-07-25', N'ayXBKO', N'2016-05-24', N'2016-07-14', N'8505', N'594'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_DNIS_CONFIG] VALUES (?, 3552757, 8946, 2, N'ABEqEQlm', N'PuIlxh', N'PiPzxFpxhA', N'yH', N'2016-07-12', N'2016-07-22', N'kCPJlyGVW', N'2016-07-08', N'2016-05-29', N'5652', N'5592'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_GROUP_CODE_CONFIG] VALUES (?, 3521714, 7543, 2, N'QZVT', N'uD', 'U', N'2016-05-15', N'2016-06-25', N'Uilk', N'2016-05-26', N'2016-07-27', N'9676', N'8465'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_KEY_GENERATOR] VALUES (?, 4424588, 1449, 2, N'oKkh', N'Q', N'zHx', N'2016-06-19', N'2016-05-07', N'cpgklMW', N'2016-05-11', N'2016-06-29', N'8219', N'5628'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_PLAN_CODE_CONFIG] VALUES (?, 1218154, 4308, 2, N'eX', N'QY', N'Bgeh', 'E', 'l', 'f', 'K', 'L', 1905702, 2153849, 'S', 'R', 'H', 'O', 'm', N'2016-06-22', N'2016-05-12', N'QIuRtZWFL', N'2016-06-15', N'2016-05-15', N'3589', N'1312'); -INSERT INTO [testdatabase].[dbo].[TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS] VALUES (?, 7044, 2, N'Z', N'M', N'kCYzL', N'2016-05-13', N'2016-06-16', N'mKhu', N'2016-05-12', N'2016-06-07', N'3396', N'2794'); +INSERT INTO [testdatabase].[dbo].[SERVICES_ACCOUNTING_CODES] VALUES (?, 3893554, 8433, 2, N'uGn', N'k', N'CTXhWJ', 'G', N'WCaOs', N'jjU', N'Di', N'dxCmNYg', N'2016-06-25', N'2016-06-08', N'qhqpcexY', N'2016-05-31', N'2016-08-07', N'5603', N'3404'); +INSERT INTO [testdatabase].[dbo].[SERVICES_CLIENT_MASTER] VALUES (?, 3364, 2, N'jC', N'yTFExSF', N'JiTvgi', N'uYdAL', N'2016-06-16', N'2016-07-25', N'ayXBKO', N'2016-05-24', N'2016-07-14', N'8505', N'594'); +INSERT INTO [testdatabase].[dbo].[SERVICES_DNIS_CONFIG] VALUES (?, 3552757, 8946, 2, N'ABEqEQlm', N'PuIlxh', N'PiPzxFpxhA', N'yH', N'2016-07-12', N'2016-07-22', N'kCPJlyGVW', N'2016-07-08', N'2016-05-29', N'5652', N'5592'); +INSERT INTO [testdatabase].[dbo].[SERVICES_GROUP_CODE_CONFIG] VALUES (?, 3521714, 7543, 2, N'QZVT', N'uD', 'U', N'2016-05-15', N'2016-06-25', N'Uilk', N'2016-05-26', N'2016-07-27', N'9676', N'8465'); +INSERT INTO [testdatabase].[dbo].[SERVICES_KEY_GENERATOR] VALUES (?, 4424588, 1449, 2, N'oKkh', N'Q', N'zHx', N'2016-06-19', N'2016-05-07', N'cpgklMW', N'2016-05-11', N'2016-06-29', N'8219', N'5628'); +INSERT INTO [testdatabase].[dbo].[SERVICES_PLAN_CODE_CONFIG] VALUES (?, 1218154, 4308, 2, N'eX', N'QY', N'Bgeh', 'E', 'l', 'f', 'K', 'L', 1905702, 2153849, 'S', 'R', 'H', 'O', 'm', N'2016-06-22', N'2016-05-12', N'QIuRtZWFL', N'2016-06-15', N'2016-05-15', N'3589', N'1312'); +INSERT INTO [testdatabase].[dbo].[SERVICES_PTMR_VARIABLE_TRANSLATIONS] VALUES (?, 7044, 2, N'Z', N'M', N'kCYzL', N'2016-05-13', N'2016-06-16', N'mKhu', N'2016-05-12', N'2016-06-07', N'3396', N'2794'); diff --git a/dtests/src/resources/scripts/cdcConnector/source_destination_tables.properties b/dtests/src/resources/scripts/cdcConnector/source_destination_tables.properties index 44a075c493..83f6870acf 100644 --- a/dtests/src/resources/scripts/cdcConnector/source_destination_tables.properties +++ b/dtests/src/resources/scripts/cdcConnector/source_destination_tables.properties @@ -115,15 +115,15 @@ testdatabase.cdc.dbo_REFERENCE_DATA_CT=REFERENCE_DATA testdatabase.cdc.dbo_RETURNED_MAIL_CT=RETURNED_MAIL testdatabase.cdc.dbo_REVENUE_CODE_CT=REVENUE_CODE testdatabase.cdc.dbo_SERVICE_CODE_CT=SERVICE_CODE -testdatabase.cdc.dbo_TMGSERVICES_ACCOUNTING_CODES_CT=TMGSERVICES_ACCOUNTING_CODES -testdatabase.cdc.dbo_TMGSERVICES_CLIENT_MASTER_CT=TMGSERVICES_CLIENT_MASTER -testdatabase.cdc.dbo_TMGSERVICES_DENIAL_REASON_CONFIG_CT=TMGSERVICES_DENIAL_REASON_CONFIG -testdatabase.cdc.dbo_TMGSERVICES_DNIS_CONFIG_CT=TMGSERVICES_DNIS_CONFIG -testdatabase.cdc.dbo_TMGSERVICES_GROUP_CODE_CONFIG_CT=TMGSERVICES_GROUP_CODE_CONFIG -testdatabase.cdc.dbo_TMGSERVICES_KEY_GENERATOR_CT=TMGSERVICES_KEY_GENERATOR -testdatabase.cdc.dbo_TMGSERVICES_PLAN_CODE_CONFIG_CT=TMGSERVICES_PLAN_CODE_CONFIG -testdatabase.cdc.dbo_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS_CT=TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS -testdatabase.cdc.dbo_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS_CT=TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS +testdatabase.cdc.dbo_SERVICES_ACCOUNTING_CODES_CT=SERVICES_ACCOUNTING_CODES +testdatabase.cdc.dbo_SERVICES_CLIENT_MASTER_CT=SERVICES_CLIENT_MASTER +testdatabase.cdc.dbo_SERVICES_DENIAL_REASON_CONFIG_CT=SERVICES_DENIAL_REASON_CONFIG +testdatabase.cdc.dbo_SERVICES_DNIS_CONFIG_CT=SERVICES_DNIS_CONFIG +testdatabase.cdc.dbo_SERVICES_GROUP_CODE_CONFIG_CT=SERVICES_GROUP_CODE_CONFIG +testdatabase.cdc.dbo_SERVICES_KEY_GENERATOR_CT=SERVICES_KEY_GENERATOR +testdatabase.cdc.dbo_SERVICES_PLAN_CODE_CONFIG_CT=SERVICES_PLAN_CODE_CONFIG +testdatabase.cdc.dbo_SERVICES_PTMR_VARIABLE_TRANSLATIONS_CT=SERVICES_PTMR_VARIABLE_TRANSLATIONS +testdatabase.cdc.dbo_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS_CT=SERVICES_SUBJECT_CATEGORY_TRANSLATIONS testdatabase.cdc.dbo_TOPIC_CT=TOPIC testdatabase.cdc.dbo_TOPIC_COMMUNICATION_CT=TOPIC_COMMUNICATION testdatabase.cdc.dbo_UM_ACTIVITY_CT=UM_ACTIVITY diff --git a/dtests/src/resources/scripts/cdcConnector/tableNameList.txt b/dtests/src/resources/scripts/cdcConnector/tableNameList.txt index 381c329c7b..5d71faa9f6 100644 --- a/dtests/src/resources/scripts/cdcConnector/tableNameList.txt +++ b/dtests/src/resources/scripts/cdcConnector/tableNameList.txt @@ -114,14 +114,14 @@ REFERENCE_DATA=CLIENT_REF_DTA_ID; RETURNED_MAIL=RTRN_MAIL_ID; REVENUE_CODE=REV_CD_ID; SERVICE_CODE=SERV_CD_ID; -TMGSERVICES_ACCOUNTING_CODES=ACCOUNTING_CODE_ID; -TMGSERVICES_CLIENT_MASTER=CLIENT_REF_ID; -TMGSERVICES_DENIAL_REASON_CONFIG=DENIAL_REASON_ID; -TMGSERVICES_DNIS_CONFIG=DNIS_ID; -TMGSERVICES_GROUP_CODE_CONFIG=GROUP_ID; -TMGSERVICES_KEY_GENERATOR=KEY_GEN_ID; -TMGSERVICES_PLAN_CODE_CONFIG=PLAN_CODE_ID; -TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS=VARIABLE_ID; +SERVICES_ACCOUNTING_CODES=ACCOUNTING_CODE_ID; +SERVICES_CLIENT_MASTER=CLIENT_REF_ID; +SERVICES_DENIAL_REASON_CONFIG=DENIAL_REASON_ID; +SERVICES_DNIS_CONFIG=DNIS_ID; +SERVICES_GROUP_CODE_CONFIG=GROUP_ID; +SERVICES_KEY_GENERATOR=KEY_GEN_ID; +SERVICES_PLAN_CODE_CONFIG=PLAN_CODE_ID; +SERVICES_PTMR_VARIABLE_TRANSLATIONS=VARIABLE_ID; TOPIC=TPC_ID; TOPIC_COMMUNICATION=TPC_INQ_ID; UM_ACTIVITY=UM_ACTY_ID; diff --git a/dtests/src/resources/scripts/clusterRecovery/createAndLoadColocatedTables.sql b/dtests/src/resources/scripts/clusterRecovery/createAndLoadColocatedTables.sql index 4ce887d7fe..ec7bb01304 100644 --- a/dtests/src/resources/scripts/clusterRecovery/createAndLoadColocatedTables.sql +++ b/dtests/src/resources/scripts/clusterRecovery/createAndLoadColocatedTables.sql @@ -2526,12 +2526,12 @@ CREATE TABLE IF NOT EXISTS RECEIPT( ) USING row OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1',colocate_with 'BILLING_ENTITY',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); INSERT INTO RECEIPT SELECT * FROM staging_RECEIPT; -DROP TABLE IF EXISTS TMGSERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_PLAN_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PLAN_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PLAN_CODE_CONFIG ( PLAN_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2561,14 +2561,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (PLAN_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'PLAN_CODE_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_PLAN_CODE_CONFIG; + INSERT INTO SERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_SERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS staging_TMGSERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS SERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS staging_SERVICES_KEY_GENERATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_KEY_GENERATOR - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_KEY_GENERATOR + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_KEY_GENERATOR ( KEY_GEN_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2586,14 +2586,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (KEY_GEN_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'KEY_GEN_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true' ); - INSERT INTO TMGSERVICES_KEY_GENERATOR SELECT * FROM staging_TMGSERVICES_KEY_GENERATOR; + INSERT INTO SERVICES_KEY_GENERATOR SELECT * FROM staging_SERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS TMGSERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_GROUP_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); - CREATE TABLE IF NOT EXISTS TMGSERVICES_GROUP_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_GROUP_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + CREATE TABLE IF NOT EXISTS SERVICES_GROUP_CODE_CONFIG ( GROUP_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2612,14 +2612,14 @@ CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG PRIMARY KEY (GROUP_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'GROUP_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_GROUP_CODE_CONFIG; + INSERT INTO SERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_SERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS SERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DNIS_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DNIS_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DNIS_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DNIS_CONFIG ( DNIS_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2638,14 +2638,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DNIS_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DNIS_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_DNIS_CONFIG SELECT * FROM staging_TMGSERVICES_DNIS_CONFIG; + INSERT INTO SERVICES_DNIS_CONFIG SELECT * FROM staging_SERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS SERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DENIAL_REASON_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DENIAL_REASON_CONFIG + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DENIAL_REASON_CONFIG ( DENIAL_REASON_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2662,14 +2662,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DENIAL_REASON_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DENIAL_REASON_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_TMGSERVICES_DENIAL_REASON_CONFIG; + INSERT INTO SERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_SERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS staging_TMGSERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS SERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS staging_SERVICES_CLIENT_MASTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_CLIENT_MASTER - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_CLIENT_MASTER + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_CLIENT_MASTER ( CLIENT_REF_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2677,7 +2677,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER CLIENT_LEGACY_CD VARCHAR(50) NULL, CLIENT_NAME VARCHAR(10) NULL, MEMBER_ID_FORMAT VARCHAR(15) NULL, - TMG_CALL_CLIENT_CODE VARCHAR(10) NULL, + CALL_CLIENT_CODE VARCHAR(10) NULL, CREATE_date date NULL, UPDATED_date date NULL, USER_NAME VARCHAR(15) NULL, @@ -2687,14 +2687,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (CLIENT_REF_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'CLIENT_REF_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_CLIENT_MASTER SELECT * FROM staging_TMGSERVICES_CLIENT_MASTER; + INSERT INTO SERVICES_CLIENT_MASTER SELECT * FROM staging_SERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS ( SUBJ_CAT_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2716,14 +2716,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (SUBJ_CAT_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'SUBJ_CAT_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; + INSERT INTO SERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS ( VARIABLE_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2741,14 +2741,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS PRIMARY KEY (VARIABLE_ID) ) USING row OPTIONS(partition_by 'VARIABLE_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; + INSERT INTO SERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_ACCOUNTING_CODES; -DROP TABLE IF EXISTS staging_TMGSERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS SERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS staging_SERVICES_ACCOUNTING_CODES; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_ACCOUNTING_CODES - USING com.databricks.spark.csv OPTIONS (path ':dataLocation/TMGSERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_ACCOUNTING_CODES + USING com.databricks.spark.csv OPTIONS (path ':dataLocation/SERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_ACCOUNTING_CODES ( ACCOUNTING_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2771,7 +2771,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (ACCOUNTING_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'ACCOUNTING_CODE_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_ACCOUNTING_CODES SELECT * FROM staging_TMGSERVICES_ACCOUNTING_CODES; + INSERT INTO SERVICES_ACCOUNTING_CODES SELECT * FROM staging_SERVICES_ACCOUNTING_CODES; DROP TABLE IF EXISTS UNAPPLIED_CASH; DROP TABLE IF EXISTS staging_UNAPPLIED_CASHE; diff --git a/dtests/src/resources/scripts/clusterRecovery/createAndLoadMixedTables.sql b/dtests/src/resources/scripts/clusterRecovery/createAndLoadMixedTables.sql index f181c5c5ae..d049364aeb 100644 --- a/dtests/src/resources/scripts/clusterRecovery/createAndLoadMixedTables.sql +++ b/dtests/src/resources/scripts/clusterRecovery/createAndLoadMixedTables.sql @@ -1,226 +1,226 @@ DROP TABLE IF EXISTS PERSON_EVENT; DROP TABLE IF EXISTS staging_PERSON_EVENT; CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT USING column OPTIONS(partition_by 'PRSN_EVNT_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_EVNT_ID ' ) AS (SELECT * FROM staging_PERSON_EVENT); DROP TABLE IF EXISTS PERSON_EVENT_ATTRIBUTE; DROP TABLE IF EXISTS staging_PERSON_EVENT_ATTRIBUTE; CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_EVENT_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_EVENT_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_EVNT_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_EVNT_ID,PRSN_EVNT_ATTR_ID ' ) AS (SELECT * FROM staging_PERSON_EVENT_ATTRIBUTE); DROP TABLE IF EXISTS CLAIM_STATUS; DROP TABLE IF EXISTS staging_CLAIM_STATUS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_STATUS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_STAT_ID ' ) AS (SELECT * FROM staging_CLAIM_STATUS); DROP TABLE IF EXISTS CLAIM_ADDITIONAL_DIAGNOSIS; DROP TABLE IF EXISTS staging_CLAIM_ADDITIONAL_DIAGNOSIS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_ADDITIONAL_DIAGNOSIS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ADD_DIAG_ID ' ) AS (SELECT * FROM staging_CLAIM_ADDITIONAL_DIAGNOSIS); DROP TABLE IF EXISTS CLAIM_DETAIL; DROP TABLE IF EXISTS staging_CLAIM_DETAIL; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_DETAIL USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_DETAIL); DROP TABLE IF EXISTS CLAIM_PAYMENT_DETAIL; DROP TABLE IF EXISTS staging_CLAIM_PAYMENT_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_PAYMENT_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_PAYMENT_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_PAYMENT_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_ID,CLM_PAY_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT_DETAIL); DROP TABLE IF EXISTS CLAIM_ATTRIBUTE; DROP TABLE IF EXISTS staging_CLAIM_ATTRIBUTE; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ATTR_ID ' ) AS (SELECT * FROM staging_CLAIM_ATTRIBUTE); DROP TABLE IF EXISTS CLAIM; DROP TABLE IF EXISTS staging_CLAIM; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID ' ) AS (SELECT * FROM staging_CLAIM); DROP TABLE IF EXISTS PERSON_CONTACT; DROP TABLE IF EXISTS staging_PERSON_CONTACT; CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_CONTACT USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CNTC_ID,PRSN_CNTC_ID ' ) AS (SELECT * FROM staging_PERSON_CONTACT); DROP TABLE IF EXISTS ORGANIZATION_CODE; DROP TABLE IF EXISTS staging_ORGANIZATION_CODE; CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ORGANIZATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ORGANIZATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATION_CODE USING column OPTIONS(partition_by 'ORG_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,ORG_ID,CD_VAL_ID,ORG_CD_ID ' ) AS (SELECT * FROM staging_ORGANIZATION_CODE); DROP TABLE IF EXISTS COMPLAINT_STATUS; DROP TABLE IF EXISTS staging_COMPLAINT_STATUS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_COMPLAINT_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/COMPLAINT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/COMPLAINT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COMPLAINT_STATUS USING column OPTIONS(partition_by 'INQ_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,INQ_ID,COMPLAINT_ID,COMPLAINT_STAT_ID ' ) AS (SELECT * FROM staging_COMPLAINT_STATUS); DROP TABLE IF EXISTS CONTACT; DROP TABLE IF EXISTS staging_CONTACT; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CONTACT USING column OPTIONS(partition_by 'CNTC_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,CNTC_ID' ) AS (SELECT * FROM staging_CONTACT); DROP TABLE IF EXISTS CLAIM_PAYMENT; DROP TABLE IF EXISTS staging_CLAIM_PAYMENT; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_PAYMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT USING column OPTIONS(partition_by 'CLM_PAY_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,CLM_PAY_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT); DROP TABLE IF EXISTS TOPIC_COMMUNICATION; DROP TABLE IF EXISTS staging_TOPIC_COMMUNICATION; CREATE EXTERNAL TABLE IF NOT EXISTS staging_TOPIC_COMMUNICATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TOPIC_COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/TOPIC_COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TOPIC_COMMUNICATION USING column OPTIONS(partition_by 'CMCN_INQ_ID', buckets '32',redundancy '1',key_columns ' CLIENT_ID,CMCN_INQ_ID,TPC_INQ_ID,CMCN_ID,TPC_ID' ) AS (SELECT * FROM staging_TOPIC_COMMUNICATION); DROP TABLE IF EXISTS CONTACT_TYPE_CONTACT; DROP TABLE IF EXISTS staging_CONTACT_TYPE_CONTACT; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CONTACT_TYPE_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CONTACT_TYPE_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CONTACT_TYPE_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CONTACT_TYPE_CONTACT USING column OPTIONS(partition_by 'CNTC_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,CNTC_ID,ORG_CNTC_TYP_ID,CNTC_TYP_CNTC_ID ' ) AS (SELECT * FROM staging_CONTACT_TYPE_CONTACT); DROP TABLE IF EXISTS TOPIC; DROP TABLE IF EXISTS staging_TOPIC; CREATE EXTERNAL TABLE IF NOT EXISTS staging_TOPIC - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TOPIC.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/TOPIC.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TOPIC USING column OPTIONS(partition_by 'INQ_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,INQ_ID,TPC_ID ' ) AS (SELECT * FROM staging_TOPIC); DROP TABLE IF EXISTS LINE_ADDITIONAL_DIAGNOSIS; DROP TABLE IF EXISTS staging_LINE_ADDITIONAL_DIAGNOSIS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_ADDITIONAL_DIAGNOSIS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LINE_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LINE_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LINE_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_ADD_DIAG_ID,LN_ADD_DIAG_ID ' ) AS (SELECT * FROM staging_LINE_ADDITIONAL_DIAGNOSIS); DROP TABLE IF EXISTS PROCEDURE_CODE; DROP TABLE IF EXISTS staging_PROCEDURE_CODE; CREATE EXTERNAL TABLE IF NOT EXISTS staging_PROCEDURE_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PROCEDURE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PROCEDURE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PROCEDURE_CODE USING column OPTIONS(partition_by 'PR_CD_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,PR_CD_ID ' ) AS (SELECT * FROM staging_PROCEDURE_CODE); DROP TABLE IF EXISTS CODE_VALUE; DROP TABLE IF EXISTS staging_CODE_VALUE; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CODE_VALUE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CODE_VALUE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CODE_VALUE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CODE_VALUE USING column OPTIONS(partition_by 'CD_VAL_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,CD_VAL_ID') AS (SELECT * FROM staging_CODE_VALUE); DROP TABLE IF EXISTS POSTAL_ADDRESS; DROP TABLE IF EXISTS staging_POSTAL_ADDRESS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_POSTAL_ADDRESS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/POSTAL_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/POSTAL_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS POSTAL_ADDRESS USING column OPTIONS(partition_by 'CNTC_ID',redundancy '1',key_columns 'CLIENT_ID,CNTC_ID,PSTL_ADDR_ID') AS (SELECT * FROM staging_POSTAL_ADDRESS); DROP TABLE IF EXISTS CLIENTS; DROP TABLE IF EXISTS staging_CLIENTS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLIENTS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLIENTS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLIENTS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLIENTS USING column OPTIONS(partition_by 'CLIENT_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID ' ) AS (SELECT * FROM staging_CLIENTS); DROP TABLE IF EXISTS PERSONS; DROP TABLE IF EXISTS staging_PERSONS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSONS USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID ' ) AS (SELECT * FROM staging_PERSONS); DROP TABLE IF EXISTS BANK; DROP TABLE IF EXISTS staging_BANK; CREATE EXTERNAL TABLE IF NOT EXISTS staging_BANK - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BANK.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BANK.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BANK USING column OPTIONS(partition_by 'BNK_ORG_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,BNK_ORG_ID,BNK_ID ' ) AS (SELECT * FROM staging_BANK); DROP TABLE IF EXISTS BILLING_ENTITY; DROP TABLE IF EXISTS staging_BILLING_ENTITY; CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,BILL_ENT_ID') AS (SELECT * FROM staging_BILLING_ENTITY); DROP TABLE IF EXISTS BENEFIT_PACKAGE; DROP TABLE IF EXISTS staging_BENEFIT_PACKAGE; CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_PACKAGE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_PACKAGE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE USING column OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,BENE_PKG_ID' ) AS (SELECT * FROM staging_BENEFIT_PACKAGE); DROP TABLE IF EXISTS GROUPS; DROP TABLE IF EXISTS staging_GROUPS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_GROUPS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GROUPS USING column OPTIONS(partition_by 'GRP_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,GRP_ID' ) AS (SELECT * FROM staging_GROUPS); DROP TABLE IF EXISTS COMMUNICATION; DROP TABLE IF EXISTS staging_COMMUNICATION; CREATE EXTERNAL TABLE IF NOT EXISTS staging_COMMUNICATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COMMUNICATION USING column OPTIONS(partition_by 'INQ_ID', buckets '32',redundancy '1',key_columns ' CLIENT_ID,INQ_ID,CMCN_ID' ) AS (SELECT * FROM staging_COMMUNICATION); DROP TABLE IF EXISTS COMPLAINT; DROP TABLE IF EXISTS staging_COMPLAINT; CREATE EXTERNAL TABLE IF NOT EXISTS staging_COMPLAINT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/COMPLAINT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/COMPLAINT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COMPLAINT USING column OPTIONS(partition_by 'INQ_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,INQ_ID,COMPLAINT_ID ' ) AS (SELECT * FROM staging_COMPLAINT); DROP TABLE IF EXISTS ADJUSTMENT; DROP TABLE IF EXISTS staging_ADJUSTMENT; CREATE EXTERNAL TABLE IF NOT EXISTS staging_ADJUSTMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ADJUSTMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ADJUSTMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ADJUSTMENT USING column OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,BILL_ENT_ID,ADJ_ID ' ) AS (SELECT * FROM staging_ADJUSTMENT); DROP TABLE IF EXISTS CLAIM_PAYMENT_REDUCTION; DROP TABLE IF EXISTS staging_CLAIM_PAYMENT_REDUCTION; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_PAYMENT_REDUCTION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_PAYMENT_REDUCTION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_PAYMENT_REDUCTION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT_REDUCTION USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT_REDUCTION); DROP TABLE IF EXISTS CLAIM_REDUCTION_DETAIL; DROP TABLE IF EXISTS staging_CLAIM_REDUCTION_DETAIL; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_REDUCTION_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_REDUCTION_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_REDUCTION_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_REDUCTION_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_DETAIL); DROP TABLE IF EXISTS CLAIM_REDUCTION_HISTORY; DROP TABLE IF EXISTS staging_CLAIM_REDUCTION_HISTORY; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_REDUCTION_HISTORY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_REDUCTION_HISTORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_REDUCTION_HISTORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_REDUCTION_HISTORY USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID', buckets '32',redundancy '1',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_HIST_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_HISTORY); DROP TABLE IF EXISTS CLAIM_COB; DROP TABLE IF EXISTS staging_CLAIM_COB; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_COB - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_COB USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_COB_ID ' ) AS (SELECT * FROM staging_CLAIM_COB); DROP TABLE IF EXISTS CLAIM_HOSPITAL; DROP TABLE IF EXISTS staging_CLAIM_HOSPITAL; CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_HOSPITAL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_HOSPITAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_HOSPITAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_HOSPITAL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_HOSP_ID ' ) AS (SELECT * FROM staging_CLAIM_HOSPITAL); DROP TABLE IF EXISTS UM_INPATIENT; DROP TABLE IF EXISTS staging_UM_INPATIENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_INPATIENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_INPATIENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT USING column OPTIONS(partition_by 'UM_RVW_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,UM_RVW_ID,UM_INPT_ID ' ) AS (SELECT * FROM staging_UM_INPATIENT); DROP TABLE IF EXISTS UM_SERVICE; DROP TABLE IF EXISTS staging_UM_SERVICE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_SERVICE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_SERVICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_SERVICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_SERVICE USING column OPTIONS(partition_by 'UM_RVW_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,UM_RVW_ID,UM_SERV_ID ' ) AS (SELECT * FROM staging_UM_SERVICE); DROP TABLE IF EXISTS INQUIRY; DROP TABLE IF EXISTS staging_INQUIRY; CREATE EXTERNAL TABLE IF NOT EXISTS staging_INQUIRY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/INQUIRY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/INQUIRY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INQUIRY( INQ_ID BIGINT NOT NULL, VER BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -238,7 +238,7 @@ INSERT INTO INQUIRY SELECT * FROM staging_INQUIRY; DROP TABLE IF EXISTS ORGANIZATIONS; DROP TABLE IF EXISTS staging_ORGANIZATIONS; CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ORGANIZATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ORGANIZATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATIONS(ORG_ID BIGINT NOT NULL, VER BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -260,7 +260,7 @@ INSERT INTO ORGANIZATIONS SELECT * FROM staging_ORGANIZATIONS; DROP TABLE IF EXISTS UM_REVIEW; DROP TABLE IF EXISTS staging_UM_REVIEW; CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_REVIEW - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_REVIEW(UM_RVW_ID BIGINT NOT NULL, VER BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -309,7 +309,7 @@ DROP TABLE IF EXISTS INVOICE_DETAIL; DROP TABLE IF EXISTS staging_INVOICE_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/INVOICE_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/INVOICE_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE_DETAIL( INV_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -338,7 +338,7 @@ DROP TABLE IF EXISTS BENEFIT_GROUP_NAME; DROP TABLE IF EXISTS staging_BENEFIT_GROUP_NAME; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUP_NAME - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_GROUP_NAME( GRP_ID BIGINT NOT NULL, BENE_GRP_ID BIGINT NOT NULL, @@ -362,7 +362,7 @@ DROP TABLE IF EXISTS BENEFIT_GROUPS; DROP TABLE IF EXISTS staging_BENEFIT_GROUPS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUPS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_GROUPS( GRP_ID BIGINT NOT NULL, BENE_PKG_ID BIGINT NOT NULL, @@ -385,7 +385,7 @@ DROP TABLE IF EXISTS BILLING_RECONCILIATION; DROP TABLE IF EXISTS staging_BILLING_RECONCILIATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_RECONCILIATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_RECONCILIATION( BILL_ENT_ID BIGINT NOT NULL, BILL_RECON_ID BIGINT NOT NULL, @@ -410,7 +410,7 @@ DROP TABLE IF EXISTS CHECKS; DROP TABLE IF EXISTS staging_CHECKS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECKS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHECKS( CLM_PAY_ID BIGINT NOT NULL, CHK_ID BIGINT NOT NULL, @@ -436,7 +436,7 @@ DROP TABLE IF EXISTS CLIENT_REFERENCE_DATA; DROP TABLE IF EXISTS staging_CLIENT_REFERENCE_DATA; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLIENT_REFERENCE_DATA - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLIENT_REFERENCE_DATA( CLIENT_REF_DTA_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -459,7 +459,7 @@ DROP TABLE IF EXISTS COB_CLAIM_DIAGNOSIS; DROP TABLE IF EXISTS staging_COB_CLAIM_DIAGNOSIS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_CLAIM_DIAGNOSIS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COB_CLAIM_DIAGNOSIS( PRSN_ID BIGINT NOT NULL, PRSN_COB_ID BIGINT NOT NULL, @@ -481,7 +481,7 @@ DROP TABLE IF EXISTS COB_ORGANIZATION_PERSON; DROP TABLE IF EXISTS staging_COB_ORGANIZATION_PERSON; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_ORGANIZATION_PERSON - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COB_ORGANIZATION_PERSON( PRSN_ID BIGINT NOT NULL, PRSN_COB_ID BIGINT NOT NULL, @@ -514,7 +514,7 @@ DROP TABLE IF EXISTS DIAGNOSIS_CODE; DROP TABLE IF EXISTS staging_DIAGNOSIS_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_DIAGNOSIS_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/DIAGNOSIS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/DIAGNOSIS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS DIAGNOSIS_CODE( DIAG_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -540,7 +540,7 @@ DROP TABLE IF EXISTS ELECTRONIC_ADDRESS; DROP TABLE IF EXISTS staging_ELECTRONIC_ADDRESS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ELECTRONIC_ADDRESS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ELECTRONIC_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ELECTRONIC_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ELECTRONIC_ADDRESS( CNTC_ID BIGINT NOT NULL, ELEC_ADDR_ID BIGINT NOT NULL, @@ -561,7 +561,7 @@ DROP TABLE IF EXISTS ENTITY_BANK_ACCOUNT; DROP TABLE IF EXISTS staging_ENTITY_BANK_ACCOUNT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ENTITY_BANK_ACCOUNT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ENTITY_BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ENTITY_BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ENTITY_BANK_ACCOUNT( bill_ent_id bigint NOT NULL, bnk_acct_id bigint NOT NULL, @@ -582,7 +582,7 @@ DROP TABLE IF EXISTS ETL_METADATA; DROP TABLE IF EXISTS staging_ETL_METADATA; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ETL_METADATA - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ETL_METADATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ETL_METADATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ETL_METADATA( MAIN_TBL VARCHAR(15) NOT NULL, TGT_TBL VARCHAR(15) NOT NULL, @@ -607,7 +607,7 @@ DROP TABLE IF EXISTS EXHIBIT; DROP TABLE IF EXISTS staging_EXHIBIT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT( EXHIBIT_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -625,7 +625,7 @@ DROP TABLE IF EXISTS EXPLANATION_CODE; DROP TABLE IF EXISTS staging_EXPLANATION_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXPLANATION_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/EXPLANATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/EXPLANATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXPLANATION_CODE( EXPLAIN_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -654,7 +654,7 @@ DROP TABLE IF EXISTS FILE_TRANSFER_RUN; DROP TABLE IF EXISTS staging_FILE_TRANSFER_RUN; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_RUN - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/FILE_TRANSFER_RUN.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/FILE_TRANSFER_RUN.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FILE_TRANSFER_RUN( FL_TRANS_RUN_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -688,7 +688,7 @@ DROP TABLE IF EXISTS FILE_TRANSFER_STAT; DROP TABLE IF EXISTS staging_FILE_TRANSFER_STAT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_STAT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/FILE_TRANSFER_STAT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/FILE_TRANSFER_STAT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FILE_TRANSFER_STAT( FL_TRANS_STAT_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -727,7 +727,7 @@ DROP TABLE IF EXISTS GENERAL_LEDGER; DROP TABLE IF EXISTS staging_GENERAL_LEDGER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/GENERAL_LEDGER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/GENERAL_LEDGER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GENERAL_LEDGER( BILL_ENT_ID BIGINT NOT NULL, GL_ID BIGINT NOT NULL, @@ -754,7 +754,7 @@ DROP TABLE IF EXISTS GROUP_RELATION; DROP TABLE IF EXISTS staging_GROUP_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_GROUP_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/GROUP_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/GROUP_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GROUP_RELATION( GRP_ID BIGINT NOT NULL, GRP_RELN_ID BIGINT NOT NULL, @@ -774,7 +774,7 @@ DROP TABLE IF EXISTS LEP_APPEAL; DROP TABLE IF EXISTS staging_LEP_APPEAL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LEP_APPEAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LEP_APPEAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LEP_APPEAL( PRSN_ID BIGINT NOT NULL, LEP_APL_ID BIGINT NOT NULL, @@ -796,7 +796,7 @@ DROP TABLE IF EXISTS LETTER; DROP TABLE IF EXISTS staging_LETTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER( PRSN_ID BIGINT NOT NULL, LTR_ID BIGINT NOT NULL, @@ -814,7 +814,7 @@ DROP TABLE IF EXISTS LINE_DISALLOW_EXPLANATION; DROP TABLE IF EXISTS staging_BLINE_DISALLOW_EXPLANATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_DISALLOW_EXPLANATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LINE_DISALLOW_EXPLANATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LINE_DISALLOW_EXPLANATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LINE_DISALLOW_EXPLANATION( PRSN_ID BIGINT NOT NULL, CLM_ID BIGINT NOT NULL, @@ -837,7 +837,7 @@ DROP TABLE IF EXISTS LINE_PROCEDURE_MODIFIER; DROP TABLE IF EXISTS staging_LINE_PROCEDURE_MODIFIER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_PROCEDURE_MODIFIER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LINE_PROCEDURE_MODIFIER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LINE_PROCEDURE_MODIFIER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LINE_PROCEDURE_MODIFIER( PRSN_ID BIGINT NOT NULL, CLM_ID BIGINT NOT NULL, @@ -859,7 +859,7 @@ DROP TABLE IF EXISTS MARX_CALENDAR; DROP TABLE IF EXISTS staging_MARX_CALENDAR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MARX_CALENDAR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/MARX_CALENDAR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/MARX_CALENDAR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MARX_CALENDAR( MARX_CAL_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -881,7 +881,7 @@ DROP TABLE IF EXISTS NOTE; DROP TABLE IF EXISTS staging_NOTE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_NOTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/NOTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/NOTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS NOTE( INQ_ID BIGINT NOT NULL, SRC_ID BIGINT NOT NULL, @@ -906,7 +906,7 @@ DROP TABLE IF EXISTS ORGANIZATION_CONTACT_TYPE; DROP TABLE IF EXISTS staging_ORGANIZATION_CONTACT_TYPE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_CONTACT_TYPE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ORGANIZATION_CONTACT_TYPE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ORGANIZATION_CONTACT_TYPE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATION_CONTACT_TYPE( ORG_ID BIGINT NOT NULL, CNTC_TYP_REF_ID BIGINT NOT NULL, @@ -925,7 +925,7 @@ DROP TABLE IF EXISTS PAYMENT; DROP TABLE IF EXISTS staging_PAYMENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PAYMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PAYMENT( BILL_ENT_ID BIGINT NOT NULL, PAY_ID BIGINT NOT NULL, @@ -953,7 +953,7 @@ DROP TABLE IF EXISTS PERSON_COB; DROP TABLE IF EXISTS staging_PERSON_COB; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_COB - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_COB( PRSN_ID BIGINT NOT NULL, PRSN_COB_ID BIGINT NOT NULL, @@ -1004,7 +1004,7 @@ DROP TABLE IF EXISTS PERSON_CODE; DROP TABLE IF EXISTS staging_PERSON_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_CODE( PRSN_ID BIGINT NOT NULL, CD_VAL_ID BIGINT NOT NULL, @@ -1025,7 +1025,7 @@ DROP TABLE IF EXISTS PERSON_LEP_EVENT; DROP TABLE IF EXISTS staging_PERSON_LEP_EVENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_EVENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_LEP_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_LEP_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_EVENT( PRSN_ID BIGINT NOT NULL, PRSN_LEP_EVNT_ID BIGINT NOT NULL, @@ -1047,7 +1047,7 @@ DROP TABLE IF EXISTS PERSON_LEP_PROFILE; DROP TABLE IF EXISTS staging_PERSON_LEP_PROFILE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_LEP_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_LEP_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE( PRSN_ID BIGINT NOT NULL, PRSN_LEP_PRFL_ID BIGINT NOT NULL, @@ -1067,7 +1067,7 @@ DROP TABLE IF EXISTS PERSON_ORGANIZATION_RELATION; DROP TABLE IF EXISTS staging_PERSON_ORGANIZATION_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ORGANIZATION_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_ORGANIZATION_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_ORGANIZATION_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ORGANIZATION_RELATION( PRSN_ID BIGINT NOT NULL, ORG_ID BIGINT NOT NULL, @@ -1089,7 +1089,7 @@ DROP TABLE IF EXISTS PERSON_PAYMENT_OPTIONS; DROP TABLE IF EXISTS staging_PERSON_PAYMENT_OPTIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_PAYMENT_OPTIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_PAYMENT_OPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_PAYMENT_OPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_PAYMENT_OPTIONS( PRSN_ID BIGINT NOT NULL, PRSN_PAY_OPTN_ID BIGINT NOT NULL, @@ -1111,7 +1111,7 @@ DROP TABLE IF EXISTS PERSON_RELATION; DROP TABLE IF EXISTS staging_PERSON_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_RELATION( PRSN_ID BIGINT NOT NULL, RLTD_PRSN_ID BIGINT NOT NULL, @@ -1133,7 +1133,7 @@ DROP TABLE IF EXISTS PERSON_ROLE; DROP TABLE IF EXISTS staging_PERSON_ROLE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ROLE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_ROLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_ROLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ROLE( PRSN_ID BIGINT NOT NULL, PRSN_TYP_ID BIGINT NOT NULL, @@ -1154,7 +1154,7 @@ DROP TABLE IF EXISTS PERSON_SUBSIDY_PROFILE; DROP TABLE IF EXISTS staging_PERSON_SUBSIDY_PROFILE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_SUBSIDY_PROFILE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_SUBSIDY_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_SUBSIDY_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_SUBSIDY_PROFILE( PRSN_ID BIGINT NOT NULL, PRSN_SBSDY_PRFL_ID BIGINT NOT NULL, @@ -1177,7 +1177,7 @@ DROP TABLE IF EXISTS PERSON_WORK_ITEM; DROP TABLE IF EXISTS staging_PERSON_WORK_ITEM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_WORK_ITEM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_WORK_ITEM( PRSN_ID BIGINT NOT NULL, PRSN_WRK_ITM_ID BIGINT NOT NULL, @@ -1200,7 +1200,7 @@ DROP TABLE IF EXISTS PHONE; DROP TABLE IF EXISTS staging_PHONE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PHONE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PHONE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PHONE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PHONE( CNTC_ID BIGINT NOT NULL, PHN_ID BIGINT NOT NULL, @@ -1222,7 +1222,7 @@ DROP TABLE IF EXISTS PLAN_MEMBERSHIPS; DROP TABLE IF EXISTS staging_PLAN_MEMBERSHIPS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PLAN_MEMBERSHIPS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PLAN_MEMBERSHIPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PLAN_MEMBERSHIPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PLAN_MEMBERSHIPS( PRSN_ID BIGINT NOT NULL, BENE_GRP_ID BIGINT NOT NULL, @@ -1249,7 +1249,7 @@ DROP TABLE IF EXISTS POS_CODE; DROP TABLE IF EXISTS staging_POS_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_POS_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/POS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/POS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS POS_CODE( POS_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1271,7 +1271,7 @@ DROP TABLE IF EXISTS PREMIUM; DROP TABLE IF EXISTS staging_PREMIUM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PREMIUM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PREMIUM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM( GRP_ID BIGINT NOT NULL, PREM_CAT_ID BIGINT NOT NULL, @@ -1295,7 +1295,7 @@ DROP TABLE IF EXISTS REFERENCE_DATA; DROP TABLE IF EXISTS staging_REFERENCE_DATAY; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_REFERENCE_DATA - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS REFERENCE_DATA( REF_DTA_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1312,7 +1312,7 @@ DROP TABLE IF EXISTS RETURNED_MAIL; DROP TABLE IF EXISTS staging_RETURNED_MAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_RETURNED_MAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/RETURNED_MAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/RETURNED_MAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS RETURNED_MAIL( RTRN_MAIL_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1334,7 +1334,7 @@ DROP TABLE IF EXISTS UM_ACTIVITY; DROP TABLE IF EXISTS staging_UM_ACTIVITY; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_ACTIVITY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_ACTIVITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_ACTIVITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_ACTIVITY( UM_RVW_ID BIGINT NOT NULL, UM_ACTY_ID BIGINT NOT NULL, @@ -1363,7 +1363,7 @@ DROP TABLE IF EXISTS UM_DIAGNOSIS_LINE; DROP TABLE IF EXISTS staging_UM_DIAGNOSIS_LINE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_LINE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_DIAGNOSIS_LINE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_DIAGNOSIS_LINE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_LINE( UM_RVW_ID BIGINT NOT NULL, UM_DIAG_SET_ID BIGINT NOT NULL, @@ -1386,7 +1386,7 @@ DROP TABLE IF EXISTS UM_DIAGNOSIS_SET; DROP TABLE IF EXISTS staging_UM_DIAGNOSIS_SET; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_SET - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_DIAGNOSIS_SET.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_DIAGNOSIS_SET.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_SET( UM_RVW_ID BIGINT NOT NULL, UM_DIAG_SET_ID BIGINT NOT NULL, @@ -1407,7 +1407,7 @@ DROP TABLE IF EXISTS BENEFIT_PACKAGE_RELATION; DROP TABLE IF EXISTS staging_BENEFIT_PACKAGE_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_RELATION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_RELATION( BENE_PKG_ID BIGINT NOT NULL, PKG_RELN_ID BIGINT NOT NULL, @@ -1427,7 +1427,7 @@ DROP TABLE IF EXISTS BILLING_ENTITY_CONTACT; DROP TABLE IF EXISTS staging_BILLING_ENTITY_CONTACT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_CONTACT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_CONTACT( BILL_ENT_ID BIGINT NOT NULL, CNTC_ID BIGINT NOT NULL, @@ -1449,7 +1449,7 @@ DROP TABLE IF EXISTS BILLING_ENTITY_DETAIL; DROP TABLE IF EXISTS staging_BILLING_ENTITY_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_DETAIL( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -1469,7 +1469,7 @@ DROP TABLE IF EXISTS CHECK_STATUS; DROP TABLE IF EXISTS staging_CHECK_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECK_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHECK_STATUS( CLM_PAY_ID BIGINT NOT NULL, CHK_ID BIGINT NOT NULL, @@ -1494,7 +1494,7 @@ DROP TABLE IF EXISTS EXHIBIT_GROUP; DROP TABLE IF EXISTS staging_EXHIBIT_GROUP; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/EXHIBIT_GROUP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/EXHIBIT_GROUP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP( EXHIBIT_GRP_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1517,7 +1517,7 @@ DROP TABLE IF EXISTS EXHIBIT_GROUP_EXHIBIT; DROP TABLE IF EXISTS staging_EXHIBIT_GROUP_EXHIBIT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP_EXHIBIT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/EXHIBIT_GROUP_EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/EXHIBIT_GROUP_EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP_EXHIBIT( EXHIBIT_ID BIGINT NOT NULL, EXHIBIT_GRP_ID BIGINT NOT NULL, @@ -1539,7 +1539,7 @@ DROP TABLE IF EXISTS GENERAL_LEDGER_MAP; DROP TABLE IF EXISTS staging_GENERAL_LEDGER_MAP; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER_MAP - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/GENERAL_LEDGER_MAP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/GENERAL_LEDGER_MAP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GENERAL_LEDGER_MAP( GL_MAP_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1560,7 +1560,7 @@ DROP TABLE IF EXISTS LEP_APPEAL_DECISION; DROP TABLE IF EXISTS staging_LEP_APPEAL_DECISION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL_DECISION - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LEP_APPEAL_DECISION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LEP_APPEAL_DECISION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LEP_APPEAL_DECISION( PRSN_ID BIGINT NOT NULL, LEP_APL_ID BIGINT NOT NULL, @@ -1583,7 +1583,7 @@ DROP TABLE IF EXISTS LETTER_DETAIL; DROP TABLE IF EXISTS staging_LETTER_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_DETAIL - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LETTER_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LETTER_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_DETAIL( PRSN_ID BIGINT NOT NULL, LTR_ID BIGINT NOT NULL, @@ -1602,7 +1602,7 @@ DROP TABLE IF EXISTS LETTER_JOB; DROP TABLE IF EXISTS staging_LETTER_JOB; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_JOB - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LETTER_JOB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LETTER_JOB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_JOB( PRSN_ID BIGINT NOT NULL, LTR_DTL_ID BIGINT NOT NULL, @@ -1627,7 +1627,7 @@ DROP TABLE IF EXISTS LETTER_REGISTER; DROP TABLE IF EXISTS staging_LETTER_REGISTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_REGISTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LETTER_REGISTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LETTER_REGISTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_REGISTER( PRSN_ID BIGINT NOT NULL, LTR_ID BIGINT NOT NULL, @@ -1651,7 +1651,7 @@ DROP TABLE IF EXISTS LETTER_WORK_ITEM; DROP TABLE IF EXISTS staging_LETTER_WORK_ITEM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_WORK_ITEM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/LETTER_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/LETTER_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_WORK_ITEM( PRSN_ID BIGINT NOT NULL, LTR_RGSTR_ID BIGINT NOT NULL, @@ -1672,7 +1672,7 @@ DROP TABLE IF EXISTS PERSON_EVENT_STATUS; DROP TABLE IF EXISTS staging_PERSON_EVENT_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_EVENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_EVENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS( PRSN_EVNT_ID BIGINT NOT NULL, PRSN_EVNT_STAT_ID BIGINT NOT NULL, @@ -1692,7 +1692,7 @@ DROP TABLE IF EXISTS PERSON_EVENT_STATUS_REASON; DROP TABLE IF EXISTS staging_PERSON_EVENT_STATUS_REASON; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS_REASON - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_EVENT_STATUS_REASON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_EVENT_STATUS_REASON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS_REASON( PRSN_EVNT_STAT_ID BIGINT NOT NULL, PRSN_EVNT_STAT_RSN_ID BIGINT NOT NULL, @@ -1711,7 +1711,7 @@ DROP TABLE IF EXISTS PERSON_LEP_PROFILE_RECORD; DROP TABLE IF EXISTS staging_PERSON_LEP_PROFILE_RECORD; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE_RECORD - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_LEP_PROFILE_RECORD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_LEP_PROFILE_RECORD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE_RECORD( PRSN_ID BIGINT NOT NULL, PRSN_LEP_PRFL_ID BIGINT NOT NULL, @@ -1742,7 +1742,7 @@ DROP TABLE IF EXISTS PREMIUM_CATEGORY; DROP TABLE IF EXISTS staging_PREMIUM_CATEGORY; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_CATEGORY - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PREMIUM_CATEGORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PREMIUM_CATEGORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_CATEGORY( GRP_ID BIGINT NOT NULL, PREM_CAT_ID BIGINT NOT NULL, @@ -1763,7 +1763,7 @@ DROP TABLE IF EXISTS PREMIUM_PART; DROP TABLE IF EXISTS staging_PREMIUM_PART; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_PART - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PREMIUM_PART.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PREMIUM_PART.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_PART( GRP_ID BIGINT NOT NULL, PREM_PART_ID BIGINT NOT NULL, @@ -1786,7 +1786,7 @@ DROP TABLE IF EXISTS PREMIUM_TABLE; DROP TABLE IF EXISTS staging_PREMIUM_TABLE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_TABLE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PREMIUM_TABLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PREMIUM_TABLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_TABLE( GRP_ID BIGINT NOT NULL, BENE_GRP_ID BIGINT NOT NULL, @@ -1808,7 +1808,7 @@ DROP TABLE IF EXISTS UM_INPATIENT_REVIEW; DROP TABLE IF EXISTS staging_UM_INPATIENT_REVIEW; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_REVIEW - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_INPATIENT_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_INPATIENT_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_REVIEW( UM_RVW_ID BIGINT NOT NULL, UM_INPT_RVW_ID BIGINT NOT NULL, @@ -1837,7 +1837,7 @@ DROP TABLE IF EXISTS UM_INPATIENT_STATUS; DROP TABLE IF EXISTS staging_UM_INPATIENT_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_INPATIENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_INPATIENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_STATUS( UM_RVW_ID BIGINT NOT NULL, UM_INPT_STAT BIGINT NOT NULL, @@ -1861,7 +1861,7 @@ DROP TABLE IF EXISTS UM_SERVICE_OVERRIDE; DROP TABLE IF EXISTS staging_UM_SERVICE_OVERRIDE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_SERVICE_OVERRIDE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_SERVICE_OVERRIDE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_SERVICE_OVERRIDE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_SERVICE_OVERRIDE( UM_RVW_ID BIGINT NOT NULL, UM_SERV_ID BIGINT NOT NULL, @@ -1888,7 +1888,7 @@ DROP TABLE IF EXISTS BANK_ACCOUNT; DROP TABLE IF EXISTS staging_BANK_ACCOUNT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BANK_ACCOUNT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BANK_ACCOUNT( BNK_ORG_ID BIGINT NOT NULL, BNK_ID BIGINT NOT NULL, @@ -1910,7 +1910,7 @@ DROP TABLE IF EXISTS UM_INPATIENT_STAY_LENGTH; DROP TABLE IF EXISTS staging_UM_INPATIENT_STAY_LENGTH; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STAY_LENGTH - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UM_INPATIENT_STAY_LENGTH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UM_INPATIENT_STAY_LENGTH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_STAY_LENGTH( UM_RVW_ID BIGINT NOT NULL, UM_INPT_RVW_ID BIGINT NOT NULL, @@ -1945,7 +1945,7 @@ DROP TABLE IF EXISTS REVENUE_CODE; DROP TABLE IF EXISTS staging_REVENUE_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_REVENUE_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/REVENUE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/REVENUE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS REVENUE_CODE( REV_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1968,7 +1968,7 @@ DROP TABLE IF EXISTS SERVICE_CODE; DROP TABLE IF EXISTS staging_SERVICE_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICE_CODE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/SERVICE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS SERVICE_CODE( SERV_CD_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -1989,7 +1989,7 @@ DROP TABLE IF EXISTS AGREEMENT; DROP TABLE IF EXISTS staging_AGREEMENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_AGREEMENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS AGREEMENT( AGREE_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -2010,7 +2010,7 @@ DROP TABLE IF EXISTS ORGANIZATION_EVENT; DROP TABLE IF EXISTS staging_ORGANIZATION_EVENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_EVENT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/ORGANIZATION_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/ORGANIZATION_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATION_EVENT( ORG_ID BIGINT NOT NULL, ORG_EVNT_ID BIGINT NOT NULL, @@ -2032,7 +2032,7 @@ DROP TABLE IF EXISTS FDI_TX_IDCARD; DROP TABLE IF EXISTS staging_FDI_TX_IDCARD; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_IDCARD - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/FDI_TX_IDCARD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/FDI_TX_IDCARD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_TX_IDCARD ( ACN_ID BIGINT NOT NULL, @@ -2071,7 +2071,7 @@ DROP TABLE IF EXISTS FDI_TX_LETTER; DROP TABLE IF EXISTS staging_FDI_TX_LETTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_LETTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/FDI_TX_LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/FDI_TX_LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_TX_LETTER ( ACN_ID BIGINT NOT NULL, @@ -2107,7 +2107,7 @@ DROP TABLE IF EXISTS BENEFIT_PACKAGE_ATTRIBUTE; DROP TABLE IF EXISTS staging_BENEFIT_PACKAGE_ATTRIBUTE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_ATTRIBUTE( BENE_PKG_ID BIGINT NOT NULL, BENE_PKG_ATTR_ID BIGINT NOT NULL, @@ -2128,7 +2128,7 @@ DROP TABLE IF EXISTS BILLING_ENTITY_SCHEDULE; DROP TABLE IF EXISTS staging_BILLING_ENTITY_SCHEDULE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_SCHEDULE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_SCHEDULE( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2151,7 +2151,7 @@ DROP TABLE IF EXISTS BILLING_SCHEDULE; DROP TABLE IF EXISTS staging_BILLING_SCHEDULE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SCHEDULE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_SCHEDULE( BILL_SCHD_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2172,7 +2172,7 @@ DROP TABLE IF EXISTS BILLING_SOURCE; DROP TABLE IF EXISTS staging_BILLING_SOURCE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SOURCE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_SOURCE( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2191,7 +2191,7 @@ DROP TABLE IF EXISTS CHARGE_ITEM; DROP TABLE IF EXISTS staging_CHARGE_ITEM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHARGE_ITEM - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHARGE_ITEM( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2217,7 +2217,7 @@ DROP TABLE IF EXISTS CLAIM_COSHARE_TRACKING; DROP TABLE IF EXISTS staging_CLAIM_COSHARE_TRACKING; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_COSHARE_TRACKING - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_COSHARE_TRACKING( PRSN_ID BIGINT NOT NULL, VER BIGINT, @@ -2246,7 +2246,7 @@ DROP TABLE IF EXISTS CLAIM_LINE_ATTRIBUTE; DROP TABLE IF EXISTS staging_CLAIM_LINE_ATTRIBUTE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_LINE_ATTRIBUTE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_LINE_ATTRIBUTE( PRSN_ID BIGINT NOT NULL, CLM_DTL_ID BIGINT NOT NULL, @@ -2271,7 +2271,7 @@ DROP TABLE IF EXISTS FDI_CORRESPONDENCE; DROP TABLE IF EXISTS staging_FDI_CORRESPONDENCE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_CORRESPONDENCE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/FDI_CORRESPONDENCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/FDI_CORRESPONDENCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_CORRESPONDENCE ( CLIENT_ID BIGINT NOT NULL, @@ -2294,7 +2294,7 @@ DROP TABLE IF EXISTS INVOICE; DROP TABLE IF EXISTS staging_INVOICE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/INVOICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/INVOICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2331,7 +2331,7 @@ DROP TABLE IF EXISTS INVOICE_STATUS; DROP TABLE IF EXISTS staging_INVOICE_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_STATUS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/INVOICE_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/INVOICE_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE_STATUS( INV_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2352,7 +2352,7 @@ DROP TABLE IF EXISTS MOOP_BALANCE_EXCEPTIONS; DROP TABLE IF EXISTS staging_MOOP_BALANCE_EXCEPTIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE_EXCEPTIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/MOOP_BALANCE_EXCEPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/MOOP_BALANCE_EXCEPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_BALANCE_EXCEPTIONS( CLIENT_ID INTEGER NOT NULL, VLD_FRM_DT DATE NOT NULL, @@ -2376,7 +2376,7 @@ DROP TABLE IF EXISTS MOOP_BALANCE; DROP TABLE IF EXISTS staging_MOOP_BALANCE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/MOOP_BALANCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/MOOP_BALANCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_BALANCE( PRSN_ID BIGINT NOT NULL, CLIENT_ID INTEGER NOT NULL, @@ -2402,7 +2402,7 @@ DROP TABLE IF EXISTS MOOP_ACCUMULATOR; DROP TABLE IF EXISTS staging_MOOP_ACCUMULATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_ACCUMULATOR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/MOOP_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/MOOP_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_ACCUMULATOR ( SUPPL_ID VARCHAR(15) NOT NULL, CLIENT_ID INTEGER NOT NULL, @@ -2424,7 +2424,7 @@ DROP TABLE IF EXISTS PERSON_ACCUMULATOR; DROP TABLE IF EXISTS staging_PERSON_ACCUMULATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ACCUMULATOR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PERSON_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PERSON_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ACCUMULATOR( PRSN_ID BIGINT NOT NULL, PRSN_ACCUM_ID BIGINT NOT NULL, @@ -2455,7 +2455,7 @@ DROP TABLE IF EXISTS PROCEDURE_PRICE; DROP TABLE IF EXISTS staging_PROCEDURE_PRICE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_PROCEDURE_PRICE - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/PROCEDURE_PRICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/PROCEDURE_PRICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PROCEDURE_PRICE( PR_CD_ID BIGINT NOT NULL, PR_PRC_ID BIGINT NOT NULL, @@ -2479,7 +2479,7 @@ DROP TABLE IF EXISTS RECEIPT; DROP TABLE IF EXISTS staging_RECEIPT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_RECEIPT - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/RECEIPT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/RECEIPT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS RECEIPT( BILL_ENT_ID BIGINT NOT NULL, CLIENT_ID BIGINT NOT NULL, @@ -2520,12 +2520,12 @@ CREATE TABLE IF NOT EXISTS RECEIPT( ) USING row OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); INSERT INTO RECEIPT SELECT * FROM staging_RECEIPT; -DROP TABLE IF EXISTS TMGSERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_PLAN_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PLAN_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PLAN_CODE_CONFIG ( PLAN_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2555,14 +2555,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (PLAN_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'PLAN_CODE_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_PLAN_CODE_CONFIG; + INSERT INTO SERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_SERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS staging_TMGSERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS SERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS staging_SERVICES_KEY_GENERATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_KEY_GENERATOR - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_KEY_GENERATOR + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_KEY_GENERATOR ( KEY_GEN_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2580,14 +2580,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (KEY_GEN_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'KEY_GEN_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true' ); - INSERT INTO TMGSERVICES_KEY_GENERATOR SELECT * FROM staging_TMGSERVICES_KEY_GENERATOR; + INSERT INTO SERVICES_KEY_GENERATOR SELECT * FROM staging_SERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS TMGSERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_GROUP_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); - CREATE TABLE IF NOT EXISTS TMGSERVICES_GROUP_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_GROUP_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + CREATE TABLE IF NOT EXISTS SERVICES_GROUP_CODE_CONFIG ( GROUP_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2606,14 +2606,14 @@ CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG PRIMARY KEY (GROUP_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'GROUP_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_GROUP_CODE_CONFIG; + INSERT INTO SERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_SERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS SERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DNIS_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DNIS_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DNIS_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DNIS_CONFIG ( DNIS_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2632,14 +2632,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DNIS_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DNIS_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_DNIS_CONFIG SELECT * FROM staging_TMGSERVICES_DNIS_CONFIG; + INSERT INTO SERVICES_DNIS_CONFIG SELECT * FROM staging_SERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS SERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS staging_SERVICES_DENIAL_REASON_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_DENIAL_REASON_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_DENIAL_REASON_CONFIG ( DENIAL_REASON_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2656,14 +2656,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (DENIAL_REASON_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'DENIAL_REASON_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_TMGSERVICES_DENIAL_REASON_CONFIG; + INSERT INTO SERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_SERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS staging_TMGSERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS SERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS staging_SERVICES_CLIENT_MASTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_CLIENT_MASTER - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_CLIENT_MASTER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_CLIENT_MASTER ( CLIENT_REF_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2671,7 +2671,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER CLIENT_LEGACY_CD VARCHAR(10) NULL, CLIENT_NAME VARCHAR(10) NULL, MEMBER_ID_FORMAT VARCHAR(15) NULL, - TMG_CALL_CLIENT_CODE VARCHAR(10) NULL, + CALL_CLIENT_CODE VARCHAR(10) NULL, CREATE_date date NULL, UPDATED_date date NULL, USER_NAME VARCHAR(15) NULL, @@ -2681,14 +2681,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (CLIENT_REF_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'CLIENT_REF_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_CLIENT_MASTER SELECT * FROM staging_TMGSERVICES_CLIENT_MASTER; + INSERT INTO SERVICES_CLIENT_MASTER SELECT * FROM staging_SERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS ( SUBJ_CAT_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2710,14 +2710,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (SUBJ_CAT_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'SUBJ_CAT_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; + INSERT INTO SERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS ( VARIABLE_ID INT NOT NULL, VER BIGINT NOT NULL, @@ -2735,14 +2735,14 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS PRIMARY KEY (VARIABLE_ID) ) USING row OPTIONS(partition_by 'VARIABLE_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; + INSERT INTO SERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_SERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_ACCOUNTING_CODES; -DROP TABLE IF EXISTS staging_TMGSERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS SERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS staging_SERVICES_ACCOUNTING_CODES; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_ACCOUNTING_CODES - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/TMGSERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICES_ACCOUNTING_CODES + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/SERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICES_ACCOUNTING_CODES ( ACCOUNTING_CODE_ID INT NOT NULL, CLIENT_REF_ID INT NOT NULL, @@ -2765,13 +2765,13 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES SRC_SYS_REC_ID VARCHAR(15) NULL, PRIMARY KEY (ACCOUNTING_CODE_ID,CLIENT_ID) ) USING row OPTIONS(partition_by 'ACCOUNTING_CODE_ID,CLIENT_ID', buckets '32',redundancy '1',EVICTION_BY 'LRUHEAPPERCENT', overflow 'true'); - INSERT INTO TMGSERVICES_ACCOUNTING_CODES SELECT * FROM staging_TMGSERVICES_ACCOUNTING_CODES; + INSERT INTO SERVICES_ACCOUNTING_CODES SELECT * FROM staging_SERVICES_ACCOUNTING_CODES; DROP TABLE IF EXISTS UNAPPLIED_CASH; DROP TABLE IF EXISTS staging_UNAPPLIED_CASHE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_UNAPPLIED_CASH - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/UNAPPLIED_CASH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/UNAPPLIED_CASH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UNAPPLIED_CASH( UNAPP_CSH_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -2794,7 +2794,7 @@ DROP TABLE IF EXISTS WORK_GENERATED_KEYS; DROP TABLE IF EXISTS staging_WORK_GENERATED_KEYS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- CREATE EXTERNAL TABLE IF NOT EXISTS staging_WORK_GENERATED_KEYS - USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/WORK_GENERATED_KEYS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/USER2_Data_20G/WORK_GENERATED_KEYS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS WORK_GENERATED_KEYS( CLIENT_ID BIGINT NOT NULL, GEN_KEY_ID BIGINT NOT NULL, diff --git a/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInRightOrder.sql b/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInRightOrder.sql index 28ea11fc54..af18ec2b22 100644 --- a/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInRightOrder.sql +++ b/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInRightOrder.sql @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS WORK_GENERATED_KEYS; DROP TABLE IF EXISTS UNAPPLIED_CASH; -DROP TABLE IF EXISTS TMGSERVICES_ACCOUNTING_CODES; -DROP TABLE IF EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS TMGSERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS TMGSERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS SERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS SERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS SERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS SERVICES_PLAN_CODE_CONFIG; DROP TABLE IF EXISTS RECEIPT; DROP TABLE IF EXISTS PROCEDURE_PRICE; DROP TABLE IF EXISTS PERSON_ACCUMULATOR; diff --git a/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInWrongOrder.sql b/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInWrongOrder.sql index af736bd372..4aeced3165 100644 --- a/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInWrongOrder.sql +++ b/dtests/src/resources/scripts/clusterRecovery/dropColocatedTablesInWrongOrder.sql @@ -127,14 +127,14 @@ DROP TABLE IF EXISTS MOOP_ACCUMULATOR; DROP TABLE IF EXISTS PERSON_ACCUMULATOR; DROP TABLE IF EXISTS PROCEDURE_PRICE; DROP TABLE IF EXISTS RECEIPT; -DROP TABLE IF EXISTS TMGSERVICES_PLAN_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_KEY_GENERATOR; -DROP TABLE IF EXISTS TMGSERVICES_GROUP_CODE_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DNIS_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_DENIAL_REASON_CONFIG; -DROP TABLE IF EXISTS TMGSERVICES_CLIENT_MASTER; -DROP TABLE IF EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; -DROP TABLE IF EXISTS TMGSERVICES_ACCOUNTING_CODES; +DROP TABLE IF EXISTS SERVICES_PLAN_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_KEY_GENERATOR; +DROP TABLE IF EXISTS SERVICES_GROUP_CODE_CONFIG; +DROP TABLE IF EXISTS SERVICES_DNIS_CONFIG; +DROP TABLE IF EXISTS SERVICES_DENIAL_REASON_CONFIG; +DROP TABLE IF EXISTS SERVICES_CLIENT_MASTER; +DROP TABLE IF EXISTS SERVICES_SUBJECT_CATEGORY_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_PTMR_VARIABLE_TRANSLATIONS; +DROP TABLE IF EXISTS SERVICES_ACCOUNTING_CODES; DROP TABLE IF EXISTS UNAPPLIED_CASH; DROP TABLE IF EXISTS WORK_GENERATED_KEYS; diff --git a/dtests/src/test/java/io/snappydata/hydra/cdcConnector/cdcConnector.bt b/dtests/src/test/java/io/snappydata/hydra/cdcConnector/cdcConnector.bt index c54a3ef962..5675c0daaf 100644 --- a/dtests/src/test/java/io/snappydata/hydra/cdcConnector/cdcConnector.bt +++ b/dtests/src/test/java/io/snappydata/hydra/cdcConnector/cdcConnector.bt @@ -4,7 +4,7 @@ io/snappydata/hydra/cdcConnector/cdcBasicValidationTest.conf C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1 D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" queryFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/scriptsForApp1/selectOps.sql" deleteQFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/bulkDeleteTemp.sql" insertQueryPath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector" @@ -30,7 +30,7 @@ io/snappydata/hydra/cdcConnector/cdcBasicValidationTest.conf C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1 D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" queryFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/pointqueries.sql" deleteQFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/bulkDeleteTemp.sql" insertQueryPath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector" @@ -53,8 +53,8 @@ io/snappydata/hydra/cdcConnector/cdcBasicValidationTest.conf io/snappydata/hydra/cdcConnector/cdcConnectorMultiSqlServerInstanceTest.conf A=snappy snappyHosts=1 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocationCol="/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data" - dataFilesLocationRow="/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data" + dataFilesLocationCol="/export/shared/QA_DATA/USER2_Data/Cluster_Data" + dataFilesLocationRow="/export/shared/QA_DATA/USER2_Data/RowTable_Data" queryFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/pointLookUpQueries.sql" deleteQFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/bulkDelete.sql" insertQueryPath1="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/scriptsForApp1" @@ -76,8 +76,8 @@ io/snappydata/hydra/cdcConnector/cdcBasicValidationTest.conf io/snappydata/hydra/cdcConnector/cdcConnectorLongRunningHAWithMultiSqlServer.conf A=snappy snappyHosts=1 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocationCol="/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data" - dataFilesLocationRow="/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data" + dataFilesLocationCol="/export/shared/QA_DATA/USER2_Data/Cluster_Data" + dataFilesLocationRow="/export/shared/QA_DATA/USER2_Data/RowTable_Data" queryFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/pointLookUpQueries.sql" deleteQFilePath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/bulkDelete.sql" updateQueryPath="$GEMFIRE/../../../dtests/src/resources/scripts/cdcConnector/update.sql" diff --git a/dtests/src/test/java/io/snappydata/hydra/clusterRecovery/clusterRecovery.bt b/dtests/src/test/java/io/snappydata/hydra/clusterRecovery/clusterRecovery.bt index 91aced2c3a..dbb1f98e7d 100644 --- a/dtests/src/test/java/io/snappydata/hydra/clusterRecovery/clusterRecovery.bt +++ b/dtests/src/test/java/io/snappydata/hydra/clusterRecovery/clusterRecovery.bt @@ -1,7 +1,7 @@ io/snappydata/hydra/clusterRecovery/newNodeStartingFirstTest.conf A=snappy snappyHosts=1 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" logPath="/nfs/users/spillai" queryPath="$GEMFIRE/../../../dtests/src/resources/scripts/clusterRecovery/selectQuery.sql" snappyFileLoc="$GEMFIRE/../snappy" @@ -11,7 +11,7 @@ io/snappydata/hydra/clusterRecovery/newNodeStartingFirstTest.conf io/snappydata/hydra/clusterRecovery/allLocatorsDownRebalanceTest.conf A=snappy snappyHosts=3 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" logPath="/nfs/users/spillai" snappyPath="$GEMFIRE/../snappy" queryPath="$GEMFIRE/../../../dtests/src/resources/scripts/clusterRecovery/selectQuery.sql" @@ -21,7 +21,7 @@ io/snappydata/hydra/clusterRecovery/allLocatorsDownRebalanceTest.conf io/snappydata/hydra/clusterRecovery/newNodeRebalanceMeanKill.conf A=snappy snappyHosts=3 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" insertQueryPath1="$GEMFIRE/../../../dtests/src/resources/scripts/clusterRecovery" logPath="/nfs/users/spillai" queryPath="$GEMFIRE/../../../dtests/src/resources/scripts/clusterRecovery/selectQuery.sql" @@ -31,7 +31,7 @@ io/snappydata/hydra/clusterRecovery/newNodeRebalanceMeanKill.conf io/snappydata/hydra/clusterRecovery/clusterRestartWithMinMem.conf A=snappy snappyHosts=3 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" logPath="/nfs/users/spillai" queryPath="$GEMFIRE/../../../dtests/src/resources/scripts/clusterRecovery/selectQuery.sql" newNode="dev1" @@ -40,7 +40,7 @@ io/snappydata/hydra/clusterRecovery/clusterRestartWithMinMem.conf io/snappydata/hydra/clusterRecovery/colocationChainDropTest.conf A=snappy snappyHosts=1 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" logPath="/nfs/users/spillai" queryPath="$GEMFIRE/../../../dtests/src/resources/scripts/clusterRecovery/selectQuery.sql" snappyPath="$GEMFIRE/../snappy" @@ -48,7 +48,7 @@ io/snappydata/hydra/clusterRecovery/colocationChainDropTest.conf io/snappydata/hydra/clusterRecovery/missingDiskStoreRecoveryTest.conf A=snappy snappyHosts=1 snappyVMsPerHost=1 snappyThreadsPerVM=10 testJar="$GEMFIRE/../../../dtests/build-artifacts/scala-2.11/libs/snappydata-store-scala-tests-0.1.0-SNAPSHOT-tests.jar" - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" logPath="/nfs/users/spillai" queryPath="$GEMFIRE/../../../dtests/src/resources/scripts/clusterRecovery/selectQuery.sql" snappyPath="$GEMFIRE/../snappy" diff --git a/dtests/src/test/java/io/snappydata/hydra/putInto/concPutInto.bt b/dtests/src/test/java/io/snappydata/hydra/putInto/concPutInto.bt index 650b9e6a2a..555b14992b 100644 --- a/dtests/src/test/java/io/snappydata/hydra/putInto/concPutInto.bt +++ b/dtests/src/test/java/io/snappydata/hydra/putInto/concPutInto.bt @@ -17,7 +17,7 @@ io/snappydata/hydra/putInto/putIntoMemLeak.conf A=snappyStore snappyStoreHosts=1 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1 B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2 C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1 - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" startRange=1000001 endRange=2000001 threadCnt=5 @@ -28,7 +28,7 @@ io/snappydata/hydra/putInto/putIntoWithOverlappingKeys.conf A=snappyStore snappyStoreHosts=1 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1 B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2 C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1 - dataFilesLocation="/export/shared/QA_DATA/TMG_Data_20G" + dataFilesLocation="/export/shared/QA_DATA/USER2_Data_20G" startRange=1000001 endRange=1001000 threadCnt=4 diff --git a/dtests/src/test/java/io/snappydata/hydra/rowStoreRegressionScript.sh b/dtests/src/test/java/io/snappydata/hydra/rowStoreRegressionScript.sh index c9bdae9643..2594996e33 100644 --- a/dtests/src/test/java/io/snappydata/hydra/rowStoreRegressionScript.sh +++ b/dtests/src/test/java/io/snappydata/hydra/rowStoreRegressionScript.sh @@ -1,4 +1,4 @@ -export JTESTS=$SNAPPY_HOME/store/tests/sql/build-artifacts/linux/classes/main +export JTESTS=$SNAPPY_HOME/store/tests/sql/build-artifacts/linux/classes/java/main $SNAPPY_HOME/store/tests/core/src/main/java/bin/sample-runbt.sh $OUTPUT_DIR/masterLogs $SNAPPY_HOME -l $JTESTS/sql/snappy.local.conf sql/sqlTx/thinClient/thinClientTx.bt sleep 30; diff --git a/dtests/src/test/java/io/snappydata/hydra/smoke.sh b/dtests/src/test/java/io/snappydata/hydra/smoke.sh index c7d502100e..5847d56c52 100755 --- a/dtests/src/test/java/io/snappydata/hydra/smoke.sh +++ b/dtests/src/test/java/io/snappydata/hydra/smoke.sh @@ -38,9 +38,7 @@ mkdir -p $resultDir shift $SNAPPYDATA_SOURCE_DIR/store/tests/core/src/main/java/bin/sample-runbt.sh $resultDir $SNAPPYDATA_SOURCE_DIR -r 1 -d false io/snappydata/hydra/cluster/startDualModeCluster_smoke.bt -sleep 30; $SNAPPYDATA_SOURCE_DIR/store/tests/core/src/main/java/bin/sample-runbt.sh $resultDir $SNAPPYDATA_SOURCE_DIR -r 1 -d false io/snappydata/hydra/smoke.bt -sleep 30; $SNAPPYDATA_SOURCE_DIR/store/tests/core/src/main/java/bin/sample-runbt.sh $resultDir $SNAPPYDATA_SOURCE_DIR -r 1 -d false io/snappydata/hydra/cluster/stopDualModeCluster.bt diff --git a/dtests/src/test/java/io/snappydata/hydra/snappyRegressionScript.sh b/dtests/src/test/java/io/snappydata/hydra/snappyRegressionScript.sh index 6d372eae87..86978e38da 100644 --- a/dtests/src/test/java/io/snappydata/hydra/snappyRegressionScript.sh +++ b/dtests/src/test/java/io/snappydata/hydra/snappyRegressionScript.sh @@ -1,4 +1,4 @@ -export JTESTS=$SNAPPY_HOME/store/tests/sql/build-artifacts/linux/classes/main +export JTESTS=$SNAPPY_HOME/store/tests/sql/build-artifacts/linux/classes/java/main $SNAPPY_HOME/store/tests/core/src/main/java/bin/sample-runbt.sh $OUTPUT_DIR/snappyHydraLogs $SNAPPY_HOME -l $JTESTS/io/snappydata/hydra/local.smartConnectorMode.conf -d false io/snappydata/hydra/northwind/northWind.bt sleep 30; diff --git a/dunit/build.gradle b/dunit/build.gradle index b4ebc7e445..02dbdae414 100644 --- a/dunit/build.gradle +++ b/dunit/build.gradle @@ -23,7 +23,7 @@ version = '1.0.3.6' compileJava.options.encoding = 'UTF-8' dependencies { - compile 'commons-io:commons-io:2.5' + compile "commons-io:commons-io:${commonsIoVersion}" compile "junit:junit:${junitVersion}" if (new File(rootDir, 'store/build.gradle').exists()) { compile project(':snappy-store:snappydata-store-core') diff --git a/gradle.properties b/gradle.properties index 19ccf6b25a..e097267349 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,12 @@ +# Gradle daemon has been disabled due to two reasons: +# 1) It frequently fails after a few runs due to OOME. +# 2) Messes up buildOutput.log by writing to it multiple +# times, increasing by one in every run i.e. first run +# will be good, then second run will write each line twice, +# third run thrice and so on. Clearing the loggerService +# explicitly makes no difference. org.gradle.daemon=false +org.gradle.warning.mode=none #org.gradle.parallel=true # Set this on the command line with -P or in ~/.gradle/gradle.properties @@ -8,5 +16,3 @@ buildRoot= # Empty credentials for maven publish on Sonatype ossrhUsername= ossrhPassword= -archivaUsername= -archivaPassword= diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index f808147c25..457aad0d98 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 7fe00909d0..ee671127ff 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,5 @@ -#Wed Sep 13 23:36:26 IST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-5.0-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-3.5.1-all.zip diff --git a/gradlew b/gradlew index 8f0616712b..0bad6a51d9 100755 --- a/gradlew +++ b/gradlew @@ -28,16 +28,16 @@ APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="-Xmx2g -XX:ReservedCodeCacheSize=512m -Djava.net.preferIPv4Stack=true" +DEFAULT_JVM_OPTS="-XX:MaxMetaspaceSize=256m -XX:+HeapDumpOnOutOfMemoryError -Xmx1g -Xms1g -Djava.net.preferIPv4Stack=true" # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" -warn ( ) { +warn () { echo "$*" } -die ( ) { +die () { echo echo "$*" echo @@ -155,7 +155,7 @@ if $cygwin ; then fi # Escape application args -save ( ) { +save () { for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done echo " " } diff --git a/gradlew.bat b/gradlew.bat index 78bf604159..5ce1db48ec 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -14,7 +14,7 @@ set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS=-Xmx2g -XX:ReservedCodeCacheSize=512m -Djava.net.preferIPv4Stack=true +set DEFAULT_JVM_OPTS=-XX:MaxMetaspaceSize=256m -XX:+HeapDumpOnOutOfMemoryError -Xmx1g -Xms1g -Djava.net.preferIPv4Stack=true @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome diff --git a/jdbc/build.gradle b/jdbc/build.gradle index 799427484e..c18707bfd1 100644 --- a/jdbc/build.gradle +++ b/jdbc/build.gradle @@ -15,10 +15,6 @@ * LICENSE file. */ -plugins { - id 'com.github.johnrengelman.shadow' version '2.0.4' -} - apply plugin: 'scala' compileScala.options.encoding = 'UTF-8' diff --git a/settings.gradle b/settings.gradle index aeaa4dcc85..86d55110e2 100644 --- a/settings.gradle +++ b/settings.gradle @@ -122,7 +122,6 @@ if (new File(rootDir, 'store/build.gradle').exists()) { include ':snappy-store:gemfire-junit' include ':snappy-store:gemfire-shared' include ':snappy-store:gemfire-core' - // include ':snappy-store:gemfire-web' include ':snappy-store:gemfire-examples' include ':snappy-store:snappydata-store-shared' include ':snappy-store:snappydata-store-prebuild' @@ -141,7 +140,6 @@ if (new File(rootDir, 'store/build.gradle').exists()) { project(':snappy-store:gemfire-junit').projectDir = "$rootDir/store/gemfire-junit" as File project(':snappy-store:gemfire-shared').projectDir = "$rootDir/store/gemfire-shared" as File project(':snappy-store:gemfire-core').projectDir = "$rootDir/store/gemfire-core" as File - // project(':snappy-store:gemfire-web').projectDir = "$rootDir/store/gemfire-web" as File project(':snappy-store:gemfire-examples').projectDir = "$rootDir/store/gemfire-examples" as File project(':snappy-store:snappydata-store-shared').projectDir = "$rootDir/store/gemfirexd/shared" as File project(':snappy-store:snappydata-store-prebuild').projectDir = "$rootDir/store/gemfirexd/prebuild" as File diff --git a/spark b/spark index c93980c7dc..8bb9fd2e97 160000 --- a/spark +++ b/spark @@ -1 +1 @@ -Subproject commit c93980c7dcdb1769961b66ec525ed199fa96d85e +Subproject commit 8bb9fd2e977feb672cc4e2a08ac2bf1d226ae98f diff --git a/spark-jobserver b/spark-jobserver index 3c38b52031..48cc2267d6 160000 --- a/spark-jobserver +++ b/spark-jobserver @@ -1 +1 @@ -Subproject commit 3c38b5203189788342316e9f81641294c9dc6f3e +Subproject commit 48cc2267d66d20f9981ae5f83bbdf31217afabb4 diff --git a/store b/store index 7cab93e5f5..644e110187 160000 --- a/store +++ b/store @@ -1 +1 @@ -Subproject commit 7cab93e5f5e5f76e83b88298e7f15ef8fc17edea +Subproject commit 644e110187a00350a96ddb82d15d0968cc01ca52