Skip to content

Commit

Permalink
Resolve all com.google.common.io.Files deprecation warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
leveyja authored and findepi committed Aug 3, 2022
1 parent dfc59b6 commit 8d6b82e
Show file tree
Hide file tree
Showing 27 changed files with 124 additions and 87 deletions.
Expand Up @@ -23,12 +23,12 @@
import org.testng.annotations.Test;

import java.io.File;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Random;

import static com.google.common.io.Files.createTempDir;
import static io.trino.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext;
import static io.trino.orc.OrcTester.writeOrcColumnTrino;
import static io.trino.orc.metadata.CompressionKind.NONE;
Expand All @@ -52,7 +52,7 @@ public void testDictionaryReaderUpdatesRetainedSize()
{
// create orc file
List<String> values = createValues();
File temporaryDirectory = createTempDir();
File temporaryDirectory = Files.createTempDirectory(null).toFile();
File orcFile = new File(temporaryDirectory, randomUUID().toString());
writeOrcColumnTrino(orcFile, NONE, VARCHAR, values.iterator(), new OrcWriterStats());

Expand Down
Expand Up @@ -38,13 +38,13 @@
import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES;
import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_VERSION;
import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT;
import static com.google.common.io.Files.write;
import static com.google.common.io.Resources.getResource;
import static io.trino.plugin.cassandra.CassandraTestingUtils.CASSANDRA_TYPE_MANAGER;
import static java.lang.String.format;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.nio.file.Files.createDirectory;
import static java.nio.file.Files.createTempDirectory;
import static java.nio.file.Files.writeString;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
Expand Down Expand Up @@ -124,7 +124,7 @@ private static String prepareCassandraYaml()

File yamlFile = tmpDirPath.resolve("cu-cassandra.yaml").toFile();
yamlFile.deleteOnExit();
write(modified, yamlFile, UTF_8);
writeString(yamlFile.toPath(), modified, UTF_8);

return yamlFile.getAbsolutePath();
}
Expand Down
Expand Up @@ -14,7 +14,6 @@
package io.trino.plugin.deltalake;

import com.google.common.collect.ImmutableList;
import com.google.common.io.Files;
import io.airlift.json.JsonCodec;
import io.airlift.json.JsonCodecFactory;
import io.airlift.slice.Slice;
Expand All @@ -39,6 +38,7 @@
import org.testng.annotations.Test;

import java.io.File;
import java.nio.file.Files;
import java.time.Instant;
import java.util.Collection;
import java.util.List;
Expand Down Expand Up @@ -72,7 +72,7 @@ public class TestDeltaLakePageSink
public void testPageSinkStats()
throws Exception
{
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
try {
DeltaLakeWriterStats stats = new DeltaLakeWriterStats();
String tablePath = tempDir.getAbsolutePath() + "/test_table";
Expand Down
Expand Up @@ -16,7 +16,6 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.Files;
import io.airlift.units.Duration;
import io.trino.plugin.deltalake.transactionlog.AddFileEntry;
import io.trino.plugin.deltalake.transactionlog.CommitInfoEntry;
Expand Down Expand Up @@ -48,6 +47,7 @@
import java.io.File;
import java.io.IOException;
import java.math.BigDecimal;
import java.nio.file.Files;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.List;
Expand Down Expand Up @@ -410,7 +410,7 @@ public void testMetadataCacheUpdates()
{
String tableName = "person";
// setupTransactionLogAccess(tableName, new Path(getClass().getClassLoader().getResource("databricks/" + tableName).toURI()));
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
File tableDir = new File(tempDir, tableName);
File transactionLogDir = new File(tableDir, TRANSACTION_LOG_DIRECTORY);
transactionLogDir.mkdirs();
Expand All @@ -419,15 +419,15 @@ public void testMetadataCacheUpdates()
for (int i = 0; i < 12; i++) {
String extension = i == 10 ? ".checkpoint.parquet" : ".json";
String fileName = format("%020d%s", i, extension);
Files.copy(resourceDir.resolve(fileName).toFile(), new File(transactionLogDir, fileName));
Files.copy(resourceDir.resolve(fileName), new File(transactionLogDir, fileName).toPath());
}
Files.copy(resourceDir.resolve(LAST_CHECKPOINT_FILENAME).toFile(), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME));
Files.copy(resourceDir.resolve(LAST_CHECKPOINT_FILENAME), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME).toPath());

setupTransactionLogAccess(tableName, new Path(tableDir.toURI()));
assertEquals(tableSnapshot.getVersion(), 11L);

String lastTransactionName = format("%020d.json", 12);
Files.copy(resourceDir.resolve(lastTransactionName).toFile(), new File(transactionLogDir, lastTransactionName));
Files.copy(resourceDir.resolve(lastTransactionName), new File(transactionLogDir, lastTransactionName).toPath());
TableSnapshot updatedSnapshot = transactionLogAccess.loadSnapshot(new SchemaTableName("schema", tableName), new Path(tableDir.toURI()), SESSION);
assertEquals(updatedSnapshot.getVersion(), 12);
}
Expand All @@ -437,7 +437,7 @@ public void testUpdatingTailEntriesNoCheckpoint()
throws Exception
{
String tableName = "person";
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
File tableDir = new File(tempDir, tableName);
File transactionLogDir = new File(tableDir, TRANSACTION_LOG_DIRECTORY);
transactionLogDir.mkdirs();
Expand Down Expand Up @@ -479,7 +479,7 @@ public void testLoadingTailEntriesPastCheckpoint()
throws Exception
{
String tableName = "person";
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
File tableDir = new File(tempDir, tableName);
File transactionLogDir = new File(tableDir, TRANSACTION_LOG_DIRECTORY);
transactionLogDir.mkdirs();
Expand All @@ -501,7 +501,7 @@ public void testLoadingTailEntriesPastCheckpoint()
assertEqualsIgnoreOrder(activeDataFiles.stream().map(AddFileEntry::getPath).collect(Collectors.toSet()), dataFiles);

copyTransactionLogEntry(8, 12, resourceDir, transactionLogDir);
Files.copy(new File(resourceDir, LAST_CHECKPOINT_FILENAME), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME));
Files.copy(new File(resourceDir, LAST_CHECKPOINT_FILENAME).toPath(), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME).toPath());
TableSnapshot updatedSnapshot = transactionLogAccess.loadSnapshot(new SchemaTableName("schema", tableName), new Path(tableDir.toURI()), SESSION);
activeDataFiles = transactionLogAccess.getActiveFiles(updatedSnapshot, SESSION);

Expand All @@ -525,14 +525,14 @@ public void testIncrementalCacheUpdates()
throws Exception
{
String tableName = "person";
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
File tableDir = new File(tempDir, tableName);
File transactionLogDir = new File(tableDir, TRANSACTION_LOG_DIRECTORY);
transactionLogDir.mkdirs();

File resourceDir = new File(getClass().getClassLoader().getResource("databricks/person/_delta_log").toURI());
copyTransactionLogEntry(0, 12, resourceDir, transactionLogDir);
Files.copy(new File(resourceDir, LAST_CHECKPOINT_FILENAME), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME));
Files.copy(new File(resourceDir, LAST_CHECKPOINT_FILENAME).toPath(), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME).toPath());

setupTransactionLogAccess(tableName, new Path(tableDir.toURI()));
List<AddFileEntry> activeDataFiles = transactionLogAccess.getActiveFiles(tableSnapshot, SESSION);
Expand Down Expand Up @@ -582,14 +582,14 @@ public void testSnapshotsAreConsistent()
throws Exception
{
String tableName = "person";
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
File tableDir = new File(tempDir, tableName);
File transactionLogDir = new File(tableDir, TRANSACTION_LOG_DIRECTORY);
transactionLogDir.mkdirs();

File resourceDir = new File(getClass().getClassLoader().getResource("databricks/person/_delta_log").toURI());
copyTransactionLogEntry(0, 12, resourceDir, transactionLogDir);
Files.copy(new File(resourceDir, LAST_CHECKPOINT_FILENAME), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME));
Files.copy(new File(resourceDir, LAST_CHECKPOINT_FILENAME).toPath(), new File(transactionLogDir, LAST_CHECKPOINT_FILENAME).toPath());

setupTransactionLogAccess(tableName, new Path(tableDir.toURI()));
List<AddFileEntry> expectedDataFiles = transactionLogAccess.getActiveFiles(tableSnapshot, SESSION);
Expand Down Expand Up @@ -637,7 +637,7 @@ public void testAddNewTransactionLogs()
throws Exception
{
String tableName = "person";
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
File tableDir = new File(tempDir, tableName);
File transactionLogDir = new File(tableDir, TRANSACTION_LOG_DIRECTORY);
transactionLogDir.mkdirs();
Expand Down Expand Up @@ -800,10 +800,10 @@ private void copyTransactionLogEntry(int startVersion, int endVersion, File sour
for (int i = startVersion; i < endVersion; i++) {
if (i % 10 == 0 && i != 0) {
String checkpointFileName = format("%020d.checkpoint.parquet", i);
Files.copy(new File(sourceDir, checkpointFileName), new File(targetDir, checkpointFileName));
Files.copy(new File(sourceDir, checkpointFileName).toPath(), new File(targetDir, checkpointFileName).toPath());
}
String lastTransactionName = format("%020d.json", i);
Files.copy(new File(sourceDir, lastTransactionName), new File(targetDir, lastTransactionName));
Files.copy(new File(sourceDir, lastTransactionName).toPath(), new File(targetDir, lastTransactionName).toPath());
}
}
}
Expand Up @@ -16,7 +16,6 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import io.airlift.json.JsonCodecFactory;
import io.trino.plugin.deltalake.DeltaLakeColumnHandle;
Expand Down Expand Up @@ -63,6 +62,7 @@
import org.testng.annotations.Test;

import java.io.File;
import java.nio.file.Files;
import java.time.LocalDate;
import java.util.Map;
import java.util.Optional;
Expand Down Expand Up @@ -95,6 +95,7 @@ public class TestDeltaLakeMetastoreStatistics

@BeforeClass
public void setupMetastore()
throws Exception
{
TestingConnectorContext context = new TestingConnectorContext();
TypeManager typeManager = context.getTypeManager();
Expand All @@ -113,7 +114,7 @@ public void setupMetastore()
hdfsEnvironment,
new ParquetReaderConfig());

File tmpDir = Files.createTempDir();
File tmpDir = Files.createTempDirectory(null).toFile();
File metastoreDir = new File(tmpDir, "metastore");
hiveMetastore = new FileHiveMetastore(
new NodeVersion("test_version"),
Expand Down
Expand Up @@ -16,7 +16,6 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import com.google.inject.Injector;
import com.google.inject.Key;
Expand Down Expand Up @@ -56,6 +55,7 @@
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.util.List;
import java.util.Map;
import java.util.Optional;
Expand Down Expand Up @@ -97,8 +97,9 @@ public class TestDeltaLakeGlueMetastore

@BeforeClass
public void setUp()
throws Exception
{
tempDir = Files.createTempDir();
tempDir = Files.createTempDirectory(null).toFile();
String temporaryLocation = tempDir.toURI().toString();

Map<String, String> config = ImmutableMap.<String, String>builder()
Expand Down Expand Up @@ -265,8 +266,7 @@ private void createTransactionLog(String deltaLakeTableLocation)
File deltaTableLogLocation = new File(new File(new URI(deltaLakeTableLocation)), "_delta_log");
verify(deltaTableLogLocation.mkdirs(), "mkdirs() on '%s' failed", deltaTableLogLocation);
byte[] entry = Resources.toByteArray(Resources.getResource("deltalake/person/_delta_log/00000000000000000000.json"));
Files.asByteSink(new File(deltaTableLogLocation, "00000000000000000000.json"))
.write(entry);
Files.write(new File(deltaTableLogLocation, "00000000000000000000.json").toPath(), entry);
}

private String tableLocation(SchemaTableName tableName)
Expand Down
Expand Up @@ -15,7 +15,6 @@

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import com.google.common.io.RecursiveDeleteOption;
import com.google.common.reflect.ClassPath;
import io.airlift.log.Logger;
Expand Down Expand Up @@ -46,6 +45,7 @@
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.net.URI;
import java.nio.file.Files;
import java.util.List;
import java.util.Optional;
import java.util.OptionalInt;
Expand Down Expand Up @@ -87,8 +87,9 @@ protected AbstractTestHiveLocal(String testDbName)

@BeforeClass(alwaysRun = true)
public void initialize()
throws Exception
{
tempDir = Files.createTempDir();
tempDir = Files.createTempDirectory(null).toFile();

HiveMetastore metastore = createMetastore(tempDir);

Expand Down
Expand Up @@ -16,7 +16,6 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.Files;
import io.airlift.json.JsonCodec;
import io.airlift.json.JsonCodecFactory;
import io.airlift.json.ObjectMapperProvider;
Expand Down Expand Up @@ -95,7 +94,6 @@
import static com.google.common.base.Verify.verify;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Sets.intersection;
import static com.google.common.io.Files.asCharSink;
import static com.google.common.io.MoreFiles.deleteRecursively;
import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE;
import static io.trino.SystemSessionProperties.COLOCATED_JOIN;
Expand Down Expand Up @@ -151,6 +149,7 @@
import static java.lang.String.join;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.nio.file.Files.createTempDirectory;
import static java.nio.file.Files.writeString;
import static java.util.Collections.nCopies;
import static java.util.Locale.ENGLISH;
import static java.util.Objects.requireNonNull;
Expand Down Expand Up @@ -3856,7 +3855,7 @@ private void testCreateExternalTable(
{
java.nio.file.Path tempDir = createTempDirectory(null);
File dataFile = tempDir.resolve("test.txt").toFile();
Files.asCharSink(dataFile, UTF_8).write(fileContents);
writeString(dataFile.toPath(), fileContents);

// Table properties
StringJoiner propertiesSql = new StringJoiner(",\n ");
Expand Down Expand Up @@ -7264,7 +7263,7 @@ private static File createAvroSchemaFile()
" \"fields\": [\n" +
" { \"name\":\"string_col\", \"type\":\"string\" }\n" +
"]}";
asCharSink(schemaFile, UTF_8).write(schema);
writeString(schemaFile.toPath(), schema);
return schemaFile;
}

Expand Down
Expand Up @@ -14,7 +14,6 @@
package io.trino.plugin.hive;

import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import io.trino.Session;
import io.trino.benchmark.BenchmarkSuite;
import io.trino.plugin.hive.metastore.Database;
Expand All @@ -25,6 +24,7 @@

import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Map;
import java.util.Optional;

Expand All @@ -42,7 +42,7 @@ public static void main(String[] args)
throws IOException
{
String outputDirectory = requireNonNull(System.getProperty("outputDirectory"), "Must specify -DoutputDirectory=...");
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
try (LocalQueryRunner localQueryRunner = createLocalQueryRunner(tempDir)) {
new BenchmarkSuite(localQueryRunner, outputDirectory).runAllBenchmarks();
}
Expand Down
Expand Up @@ -16,7 +16,6 @@
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import io.airlift.json.JsonCodec;
import io.airlift.slice.Slices;
import io.trino.operator.GroupByHashPageIndexerFactory;
Expand Down Expand Up @@ -48,6 +47,7 @@
import org.testng.annotations.Test;

import java.io.File;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
Expand Down Expand Up @@ -106,7 +106,7 @@ public void testAllFormats()
throws Exception
{
HiveConfig config = new HiveConfig();
File tempDir = Files.createTempDir();
File tempDir = Files.createTempDirectory(null).toFile();
try {
HiveMetastore metastore = createTestingFileHiveMetastore(new File(tempDir, "metastore"));
for (HiveStorageFormat format : HiveStorageFormat.values()) {
Expand Down
Expand Up @@ -200,6 +200,7 @@ protected AWSGlueAsync getGlueClient()
@BeforeClass(alwaysRun = true)
@Override
public void initialize()
throws Exception
{
super.initialize();
// uncomment to get extra AWS debug information
Expand Down

0 comments on commit 8d6b82e

Please sign in to comment.