From 4ae4192f534bd0f32f498138e63ba2e9f96ed1c2 Mon Sep 17 00:00:00 2001 From: Emilio Date: Mon, 25 Mar 2024 19:39:13 +0000 Subject: [PATCH] Replace hadoop minicluster tests with testcontainers (#3082) * Fix CI continue on test failure * Increase timeout in postgis test to reduce CI failures --- .github/workflows/build-and-test-2.12.yml | 7 ++- .github/workflows/build-and-test-2.13.yml | 7 ++- build/cqs.tsv | 5 +- .../geomesa-accumulo-jobs/pom.xml | 14 ----- geomesa-fs/geomesa-fs-datastore/pom.xml | 26 ++++++++ .../geomesa/fs/HadoopSharedCluster.scala | 59 +++++++++++++++++++ geomesa-fs/geomesa-fs-spark-runtime/pom.xml | 17 +++--- .../fs/spark/FileSystemRDDProviderTest.scala | 22 +------ geomesa-fs/geomesa-fs-tools/pom.xml | 20 ++++--- .../fs/tools/ingest/CompactCommandTest.scala | 32 ++++------ .../ingest/FsManageMetadataCommandTest.scala | 22 +------ .../PartitionedPostgisDataStoreTest.scala | 4 +- .../utils/classpath/ClassPathUtils.scala | 2 +- pom.xml | 9 ++- 14 files changed, 142 insertions(+), 104 deletions(-) create mode 100644 geomesa-fs/geomesa-fs-datastore/src/test/scala/org/locationtech/geomesa/fs/HadoopSharedCluster.scala diff --git a/.github/workflows/build-and-test-2.12.yml b/.github/workflows/build-and-test-2.12.yml index 7c3b3f6eada2..1c517c65f892 100644 --- a/.github/workflows/build-and-test-2.12.yml +++ b/.github/workflows/build-and-test-2.12.yml @@ -28,23 +28,24 @@ jobs: run: ./build/mvn clean install $MAVEN_CLI_OPTS -DskipTests -T4 - name: Unit tests id: test + continue-on-error: true run: | set -o pipefail mvn surefire:test $MAVEN_CLI_OPTS $MAVEN_TEST_OPTS | tee -a test.log - continue-on-error: true - name: Unit tests (retry) id: test-retry if: steps.test.outcome=='failure' + continue-on-error: true run: | set -o pipefail - RESUME_FROM="$(tail -n2 test.log | grep 'rf' | sed 's/.*-rf/-rf/')" + RESUME_FROM="$(grep --text 'mvn -rf ' test.log | tail -n1 | sed 's/.*-rf/-rf/')" mvn surefire:test $MAVEN_CLI_OPTS $MAVEN_TEST_OPTS $RESUME_FROM | tee -a test.log - name: Unit tests (retry) id: test-retry-retry if: steps.test-retry.outcome=='failure' run: | set -o pipefail - RESUME_FROM="$(tail -n2 test.log | grep 'rf' | sed 's/.*-rf/-rf/')" + RESUME_FROM="$(grep --text 'mvn -rf ' test.log | tail -n1 | sed 's/.*-rf/-rf/')" mvn surefire:test $MAVEN_CLI_OPTS $MAVEN_TEST_OPTS $RESUME_FROM | tee -a test.log - name: Remove geomesa artifacts if: success() || failure() diff --git a/.github/workflows/build-and-test-2.13.yml b/.github/workflows/build-and-test-2.13.yml index f697bb9a7361..5da0d2f8f77c 100644 --- a/.github/workflows/build-and-test-2.13.yml +++ b/.github/workflows/build-and-test-2.13.yml @@ -30,23 +30,24 @@ jobs: run: ./build/mvn clean install $MAVEN_CLI_OPTS -DskipTests -T4 - name: Unit tests id: test + continue-on-error: true run: | set -o pipefail mvn surefire:test $MAVEN_CLI_OPTS $MAVEN_TEST_OPTS | tee -a test.log - continue-on-error: true - name: Unit tests (retry) id: test-retry if: steps.test.outcome=='failure' + continue-on-error: true run: | set -o pipefail - RESUME_FROM="$(tail -n2 test.log | grep 'rf' | sed 's/.*-rf/-rf/')" + RESUME_FROM="$(grep --text 'mvn -rf ' test.log | tail -n1 | sed 's/.*-rf/-rf/')" mvn surefire:test $MAVEN_CLI_OPTS $MAVEN_TEST_OPTS $RESUME_FROM | tee -a test.log - name: Unit tests (retry) id: test-retry-retry if: steps.test-retry.outcome=='failure' run: | set -o pipefail - RESUME_FROM="$(tail -n2 test.log | grep 'rf' | sed 's/.*-rf/-rf/')" + RESUME_FROM="$(grep --text 'mvn -rf ' test.log | tail -n1 | sed 's/.*-rf/-rf/')" mvn surefire:test $MAVEN_CLI_OPTS $MAVEN_TEST_OPTS $RESUME_FROM | tee -a test.log - name: Remove geomesa artifacts if: success() || failure() diff --git a/build/cqs.tsv b/build/cqs.tsv index a1d103c0cb7b..75d426df05a6 100644 --- a/build/cqs.tsv +++ b/build/cqs.tsv @@ -275,8 +275,8 @@ org.apache.hadoop:hadoop-client 3.3.6 provided org.apache.hadoop:hadoop-common 3.3.6 provided org.apache.hadoop:hadoop-distcp 3.3.6 provided org.apache.hadoop:hadoop-hdfs 3.3.6 provided -org.apache.hadoop:hadoop-mapreduce-client-common 3.3.6 provided org.apache.hadoop:hadoop-mapreduce-client-core 3.3.6 provided +org.apache.hadoop:hadoop-mapreduce-client-jobclient 3.3.6 provided org.apache.hadoop:hadoop-yarn-api 3.3.6 provided org.apache.hadoop:hadoop-yarn-common 3.3.6 provided org.apache.hbase:hbase-server 2.5.7-hadoop3 provided @@ -302,7 +302,6 @@ org.apache.arrow:arrow-vector tests:15.0.2 test org.apache.cassandra:cassandra-all 3.11.14 test org.apache.cassandra:cassandra-thrift 3.11.14 test org.apache.curator:curator-test 5.6.0 test -org.apache.hadoop:hadoop-minicluster 3.3.6 test org.apache.hbase:hbase-testing-util 2.5.7-hadoop3 test org.apache.kafka:kafka-clients test:3.7.0 test org.apache.kafka:kafka-streams-test-utils 3.7.0 test @@ -311,7 +310,7 @@ org.apache.logging.log4j:log4j-core 2.22.1 test org.apache.sedona:sedona-common 1.5.0 test org.cassandraunit:cassandra-unit 3.7.1.0 test org.codehaus.groovy:groovy-jsr223 3.0.20 test -org.geomesa.testcontainers:testcontainers-accumulo 1.1.0 test +org.geomesa.testcontainers:testcontainers-accumulo 1.3.0 test org.geotools:gt-epsg-hsql 30.2 test org.jruby:jruby 9.4.5.0 test org.mockito:mockito-core 2.28.2 test diff --git a/geomesa-accumulo/geomesa-accumulo-jobs/pom.xml b/geomesa-accumulo/geomesa-accumulo-jobs/pom.xml index 095cd26050b5..e8bd54427612 100644 --- a/geomesa-accumulo/geomesa-accumulo-jobs/pom.xml +++ b/geomesa-accumulo/geomesa-accumulo-jobs/pom.xml @@ -82,20 +82,6 @@ org.geomesa.testcontainers testcontainers-accumulo - - - org.xerial.snappy - snappy-java - ${snappy.java.version} - test - - - - org.mockito - mockito-core - ${hadoop.minicluster.mockito.version} - test - diff --git a/geomesa-fs/geomesa-fs-datastore/pom.xml b/geomesa-fs/geomesa-fs-datastore/pom.xml index 8091521dbd9d..1e306c4fff5d 100644 --- a/geomesa-fs/geomesa-fs-datastore/pom.xml +++ b/geomesa-fs/geomesa-fs-datastore/pom.xml @@ -64,6 +64,7 @@ hadoop-mapreduce-client-core + org.specs2 specs2-core_${scala.binary.version} @@ -82,6 +83,31 @@ geomesa-fs-storage-orc_${scala.binary.version} test + + + org.testcontainers + testcontainers + + + org.geomesa.testcontainers + testcontainers-accumulo + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + diff --git a/geomesa-fs/geomesa-fs-datastore/src/test/scala/org/locationtech/geomesa/fs/HadoopSharedCluster.scala b/geomesa-fs/geomesa-fs-datastore/src/test/scala/org/locationtech/geomesa/fs/HadoopSharedCluster.scala new file mode 100644 index 000000000000..5a262f50b489 --- /dev/null +++ b/geomesa-fs/geomesa-fs-datastore/src/test/scala/org/locationtech/geomesa/fs/HadoopSharedCluster.scala @@ -0,0 +1,59 @@ +/*********************************************************************** + * Copyright (c) 2013-2024 Commonwealth Computer Research, Inc. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Apache License, Version 2.0 + * which accompanies this distribution and is available at + * http://www.opensource.org/licenses/apache2.0.php. + ***********************************************************************/ + +package org.locationtech.geomesa.fs + +import com.typesafe.scalalogging.StrictLogging +import org.apache.hadoop.conf.Configuration +import org.geomesa.testcontainers.HadoopContainer +import org.testcontainers.utility.DockerImageName + +import java.io.{ByteArrayInputStream, StringWriter} +import java.nio.charset.StandardCharsets +import java.util.concurrent.atomic.AtomicBoolean +import scala.util.Try + +/** + * Hadoop cluster for testing. Singleton object that is shared between all test classes in the jvm. + */ +object HadoopSharedCluster extends StrictLogging { + + val ImageName = + DockerImageName.parse("ghcr.io/geomesa/accumulo-uno") + .withTag(sys.props.getOrElse("accumulo.docker.tag", "2.1.2")) + + lazy val Container: HadoopContainer = tryContainer.get + + lazy val ContainerConfig: String = { + val conf = new Configuration(false) + conf.addResource(new ByteArrayInputStream(Container.getConfigurationXml.getBytes(StandardCharsets.UTF_8)), "") + conf.set("parquet.compression", "GZIP", "") // default is snappy which is not on our classpath + val writer = new StringWriter() + conf.writeXml(writer) + writer.toString + } + + private lazy val tryContainer: Try[HadoopContainer] = Try { + logger.info("Starting Hadoop container") + val container = new HadoopContainer(ImageName) + initialized.getAndSet(true) + container.start() + logger.info("Started Hadoop container") + container + } + + private val initialized = new AtomicBoolean(false) + + sys.addShutdownHook({ + if (initialized.get) { + logger.info("Stopping Hadoop container") + tryContainer.foreach(_.stop()) + logger.info("Stopped Hadoop container") + } + }) +} diff --git a/geomesa-fs/geomesa-fs-spark-runtime/pom.xml b/geomesa-fs/geomesa-fs-spark-runtime/pom.xml index 3bd1425449ae..ae09a6100856 100644 --- a/geomesa-fs/geomesa-fs-spark-runtime/pom.xml +++ b/geomesa-fs/geomesa-fs-spark-runtime/pom.xml @@ -93,16 +93,17 @@ test - org.apache.hadoop - hadoop-minicluster - test + org.locationtech.geomesa + geomesa-fs-datastore_${scala.binary.version} + tests - - org.mockito - mockito-core - ${hadoop.minicluster.mockito.version} - test + org.testcontainers + testcontainers + + + org.geomesa.testcontainers + testcontainers-accumulo diff --git a/geomesa-fs/geomesa-fs-spark-runtime/src/test/scala/org/locationtech/geomesa/fs/spark/FileSystemRDDProviderTest.scala b/geomesa-fs/geomesa-fs-spark-runtime/src/test/scala/org/locationtech/geomesa/fs/spark/FileSystemRDDProviderTest.scala index 66ebeb3d7525..57f011f229c5 100644 --- a/geomesa-fs/geomesa-fs-spark-runtime/src/test/scala/org/locationtech/geomesa/fs/spark/FileSystemRDDProviderTest.scala +++ b/geomesa-fs/geomesa-fs-spark-runtime/src/test/scala/org/locationtech/geomesa/fs/spark/FileSystemRDDProviderTest.scala @@ -9,13 +9,12 @@ package org.locationtech.geomesa.fs.spark import com.typesafe.scalalogging.LazyLogging -import org.apache.commons.io.FileUtils -import org.apache.hadoop.hdfs.{HdfsConfiguration, MiniDFSCluster} import org.apache.spark.sql.{SQLContext, SparkSession} import org.geotools.api.data.{DataStore, DataStoreFinder, Transaction} import org.geotools.filter.text.ecql.ECQL import org.junit.runner.RunWith import org.locationtech.geomesa.features.ScalaSimpleFeature +import org.locationtech.geomesa.fs.HadoopSharedCluster import org.locationtech.geomesa.spark.SparkSQLTestUtils import org.locationtech.geomesa.spark.sql.SQLTypes import org.locationtech.geomesa.utils.geotools.{FeatureUtils, SimpleFeatureTypes} @@ -24,8 +23,6 @@ import org.locationtech.geomesa.utils.text.WKTUtils import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner -import java.nio.file.{Files, Path} - @RunWith(classOf[JUnitRunner]) class FileSystemRDDProviderTest extends Specification with LazyLogging { @@ -36,26 +33,16 @@ class FileSystemRDDProviderTest extends Specification with LazyLogging { sequential - val tempDir: Path = Files.createTempDirectory("fsSparkTest") - - var cluster: MiniDFSCluster = _ - var directory: String = _ - var spark: SparkSession = _ var sc: SQLContext = _ - lazy val params = Map("fs.path" -> directory) + lazy val path = s"${HadoopSharedCluster.Container.getHdfsUrl}/${getClass.getSimpleName}/" + lazy val params = Map("fs.path" -> path) lazy val ds: DataStore = DataStoreFinder.getDataStore(params.asJava) val formats = Seq("orc", "parquet") step { - // Start MiniCluster - val conf = new HdfsConfiguration() - conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.toFile.getAbsolutePath) - cluster = new MiniDFSCluster.Builder(conf).build() - directory = cluster.getURI + "/data/chicago" - formats.foreach { format => val sft = SimpleFeatureTypes.createType(format, "arrest:String,case_number:Int:index=full:cardinality=high,dtg:Date,*geom:Point:srid=4326") @@ -216,8 +203,5 @@ class FileSystemRDDProviderTest extends Specification with LazyLogging { step { ds.dispose() - // Stop MiniCluster - cluster.shutdown() - FileUtils.deleteDirectory(tempDir.toFile) } } diff --git a/geomesa-fs/geomesa-fs-tools/pom.xml b/geomesa-fs/geomesa-fs-tools/pom.xml index 2d91d786fd79..ead41ffa1b5b 100644 --- a/geomesa-fs/geomesa-fs-tools/pom.xml +++ b/geomesa-fs/geomesa-fs-tools/pom.xml @@ -47,6 +47,10 @@ org.apache.hadoop hadoop-mapreduce-client-core + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + org.slf4j @@ -69,19 +73,17 @@ specs2-junit_${scala.binary.version} - org.apache.hadoop - hadoop-mapreduce-client-common + org.locationtech.geomesa + geomesa-fs-datastore_${scala.binary.version} + tests - org.apache.hadoop - hadoop-minicluster + org.testcontainers + testcontainers - - org.mockito - mockito-core - ${hadoop.minicluster.mockito.version} - test + org.geomesa.testcontainers + testcontainers-accumulo diff --git a/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/CompactCommandTest.scala b/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/CompactCommandTest.scala index 8127f1ada498..3ea889ef558b 100644 --- a/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/CompactCommandTest.scala +++ b/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/CompactCommandTest.scala @@ -8,12 +8,11 @@ package org.locationtech.geomesa.fs.tools.ingest -import org.apache.commons.io.FileUtils -import org.apache.hadoop.hdfs.{HdfsConfiguration, MiniDFSCluster} import org.geotools.api.data.{DataStoreFinder, Query, Transaction} import org.geotools.api.feature.simple.{SimpleFeature, SimpleFeatureType} import org.junit.runner.RunWith import org.locationtech.geomesa.features.ScalaSimpleFeature +import org.locationtech.geomesa.fs.HadoopSharedCluster import org.locationtech.geomesa.fs.data.FileSystemDataStore import org.locationtech.geomesa.fs.tools.compact.FsCompactCommand import org.locationtech.geomesa.tools.DistributedRunParam.RunModes @@ -25,8 +24,6 @@ import org.specs2.matcher.MatchResult import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner -import java.nio.file.{Files, Path} - @RunWith(classOf[JUnitRunner]) class CompactCommandTest extends Specification { @@ -36,13 +33,8 @@ class CompactCommandTest extends Specification { sequential - val tempDir: Path = Files.createTempDirectory("compactCommand") - val encodings = Seq("parquet", "orc") - var cluster: MiniDFSCluster = _ - var directory: String = _ - val pt = WKTUtils.read("POINT(0 0)") val line = WKTUtils.read("LINESTRING(0 0, 1 1, 4 4)") val polygon = WKTUtils.read("POLYGON((10 10, 10 20, 20 20, 20 10, 10 10), (11 11, 19 11, 19 19, 11 19, 11 11))") @@ -63,10 +55,13 @@ class CompactCommandTest extends Specification { val numFeatures = 10000 val targetFileSize = 8000L // kind of a magic number, in that it divides up the features into files fairly evenly with no remainder + lazy val path = s"${HadoopSharedCluster.Container.getHdfsUrl}/${getClass.getSimpleName}/" + lazy val ds = { val dsParams = Map( - "fs.path" -> directory, - "fs.config.xml" -> "parquet.compressionGZIP") + "fs.path" -> path, + "fs.config.xml" -> HadoopSharedCluster.ContainerConfig + ) DataStoreFinder.getDataStore(dsParams.asJava).asInstanceOf[FileSystemDataStore] } @@ -79,11 +74,6 @@ class CompactCommandTest extends Specification { } step { - // Start MiniCluster - val conf = new HdfsConfiguration() - conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.toFile.getAbsolutePath) - cluster = new MiniDFSCluster.Builder(conf).build() - directory = cluster.getDataDirectory + "_fs" sfts.foreach { sft => ds.createSchema(sft) // create 2 files per partition @@ -117,10 +107,11 @@ class CompactCommandTest extends Specification { foreach(sfts) { sft => val command = new FsCompactCommand() command.params.featureName = sft.getTypeName - command.params.path = directory + command.params.path = path command.params.runMode = RunModes.Distributed.toString // invoke on our existing store so the cached metadata gets updated - command.compact(ds) must not(throwAn[Exception]) + command.compact(ds)// must not(throwAn[Exception]) + ok } } @@ -145,7 +136,7 @@ class CompactCommandTest extends Specification { foreach(sfts) { sft => val command = new FsCompactCommand() command.params.featureName = sft.getTypeName - command.params.path = directory + command.params.path = path command.params.runMode = RunModes.Distributed.toString command.params.targetFileSize = targetFileSize // invoke on our existing store so the cached metadata gets updated @@ -188,8 +179,5 @@ class CompactCommandTest extends Specification { step { ds.dispose() - // Stop MiniCluster - cluster.shutdown() - FileUtils.deleteDirectory(tempDir.toFile) } } diff --git a/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/FsManageMetadataCommandTest.scala b/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/FsManageMetadataCommandTest.scala index a6b2ece1d131..b7f6dbde9928 100644 --- a/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/FsManageMetadataCommandTest.scala +++ b/geomesa-fs/geomesa-fs-tools/src/test/scala/org/locationtech/geomesa/fs/tools/ingest/FsManageMetadataCommandTest.scala @@ -8,12 +8,11 @@ package org.locationtech.geomesa.fs.tools.ingest -import org.apache.commons.io.FileUtils import org.apache.hadoop.fs.Path -import org.apache.hadoop.hdfs.{HdfsConfiguration, MiniDFSCluster} import org.geotools.api.data.{DataStoreFinder, Query, Transaction} import org.junit.runner.RunWith import org.locationtech.geomesa.features.ScalaSimpleFeature +import org.locationtech.geomesa.fs.HadoopSharedCluster import org.locationtech.geomesa.fs.data.FileSystemDataStore import org.locationtech.geomesa.utils.collection.SelfClosingIterator import org.locationtech.geomesa.utils.geotools.{FeatureUtils, SimpleFeatureTypes} @@ -21,7 +20,6 @@ import org.locationtech.geomesa.utils.io.WithClose import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner -import java.nio.file.Files import java.util.Collections import java.util.concurrent.atomic.AtomicInteger @@ -32,8 +30,6 @@ class FsManageMetadataCommandTest extends Specification { import scala.collection.JavaConverters._ - val dir = Files.createTempDirectory("gm-FsManageMetadataCommandTest").toFile - val sft = SimpleFeatureTypes.createType("test", "name:String,dtg:Date,*geom:Point:srid=4326") sft.setEncoding("parquet") sft.setScheme("daily") @@ -50,15 +46,8 @@ class FsManageMetadataCommandTest extends Specification { val counter = new AtomicInteger(0) - var cluster: MiniDFSCluster = _ - - def nextPath(): String = s"${cluster.getDataDirectory}_fs${counter.incrementAndGet()}" - - step { - val conf = new HdfsConfiguration() - conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.getAbsolutePath) - cluster = new MiniDFSCluster.Builder(conf).build() - } + def nextPath(): String = + s"${HadoopSharedCluster.Container.getHdfsUrl}/${getClass.getSimpleName}/${counter.incrementAndGet()}/" "ManageMetadata command" should { "find file inconsistencies" in { @@ -139,9 +128,4 @@ class FsManageMetadataCommandTest extends Specification { } } } - - step { - cluster.shutdown() - FileUtils.deleteDirectory(dir) - } } diff --git a/geomesa-gt/geomesa-gt-partitioning/src/test/scala/org/locationtech/geomesa/gt/partition/postgis/PartitionedPostgisDataStoreTest.scala b/geomesa-gt/geomesa-gt-partitioning/src/test/scala/org/locationtech/geomesa/gt/partition/postgis/PartitionedPostgisDataStoreTest.scala index cdc41a8e316f..3d46f485aa30 100644 --- a/geomesa-gt/geomesa-gt-partitioning/src/test/scala/org/locationtech/geomesa/gt/partition/postgis/PartitionedPostgisDataStoreTest.scala +++ b/geomesa-gt/geomesa-gt-partitioning/src/test/scala/org/locationtech/geomesa/gt/partition/postgis/PartitionedPostgisDataStoreTest.scala @@ -580,7 +580,7 @@ class PartitionedPostgisDataStoreTest extends Specification with BeforeAfterAll "support idle_in_transaction_session_timeout" in { val sft = SimpleFeatureTypes.renameSft(this.sft, "timeout") - val ds = DataStoreFinder.getDataStore((params ++ Map("idle_in_transaction_session_timeout" -> "100ms", "fetch size" -> 1)).asJava) + val ds = DataStoreFinder.getDataStore((params ++ Map("idle_in_transaction_session_timeout" -> "500ms", "fetch size" -> 1)).asJava) ds must not(beNull) try { @@ -608,7 +608,7 @@ class PartitionedPostgisDataStoreTest extends Specification with BeforeAfterAll WithClose(ds.getFeatureReader(new Query(sft.getTypeName), Transaction.AUTO_COMMIT)) { reader => reader.hasNext must beTrue reader.next must not(beNull) - Thread.sleep(120) + Thread.sleep(600) reader.hasNext must throwAn[Exception] } } finally { diff --git a/geomesa-utils-parent/geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/classpath/ClassPathUtils.scala b/geomesa-utils-parent/geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/classpath/ClassPathUtils.scala index e9bab03d4123..2d42a7c3ae32 100644 --- a/geomesa-utils-parent/geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/classpath/ClassPathUtils.scala +++ b/geomesa-utils-parent/geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/classpath/ClassPathUtils.scala @@ -43,7 +43,7 @@ object ClassPathUtils extends LazyLogging { } if (remaining.nonEmpty) { - logger.warn(s"Could not find requested jars: $remaining") + logger.warn(s"Could not find requested jars: ${remaining.mkString(", ")}") } foundJars.distinct.toSeq diff --git a/pom.xml b/pom.xml index 2070e7389012..586cc8dd16e6 100644 --- a/pom.xml +++ b/pom.xml @@ -119,7 +119,7 @@ 4.13.2 5.9.3 1.19.7 - 1.1.0 + 1.3.0 2.28.2 @@ -867,6 +867,13 @@ tests test + + org.locationtech.geomesa + geomesa-fs-datastore_${scala.binary.version} + ${project.version} + tests + test + org.locationtech.geomesa geomesa-cqengine-datastore_${scala.binary.version}